net/ixgbe: fix flow control status
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_string_fns.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_kvargs.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_dev.h>
35 #include <rte_hash_crc.h>
36 #ifdef RTE_LIBRTE_SECURITY
37 #include <rte_security_driver.h>
38 #endif
39
40 #include "ixgbe_logs.h"
41 #include "base/ixgbe_api.h"
42 #include "base/ixgbe_vf.h"
43 #include "base/ixgbe_common.h"
44 #include "ixgbe_ethdev.h"
45 #include "ixgbe_bypass.h"
46 #include "ixgbe_rxtx.h"
47 #include "base/ixgbe_type.h"
48 #include "base/ixgbe_phy.h"
49 #include "ixgbe_regs.h"
50
51 /*
52  * High threshold controlling when to start sending XOFF frames. Must be at
53  * least 8 bytes less than receive packet buffer size. This value is in units
54  * of 1024 bytes.
55  */
56 #define IXGBE_FC_HI    0x80
57
58 /*
59  * Low threshold controlling when to start sending XON frames. This value is
60  * in units of 1024 bytes.
61  */
62 #define IXGBE_FC_LO    0x40
63
64 /* Timer value included in XOFF frames. */
65 #define IXGBE_FC_PAUSE 0x680
66
67 /*Default value of Max Rx Queue*/
68 #define IXGBE_MAX_RX_QUEUE_NUM 128
69
70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
71 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
72 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
73
74 #define IXGBE_MMW_SIZE_DEFAULT        0x4
75 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
76 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
77
78 /*
79  *  Default values for RX/TX configuration
80  */
81 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
82 #define IXGBE_DEFAULT_RX_PTHRESH      8
83 #define IXGBE_DEFAULT_RX_HTHRESH      8
84 #define IXGBE_DEFAULT_RX_WTHRESH      0
85
86 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
87 #define IXGBE_DEFAULT_TX_PTHRESH      32
88 #define IXGBE_DEFAULT_TX_HTHRESH      0
89 #define IXGBE_DEFAULT_TX_WTHRESH      0
90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
91
92 /* Bit shift and mask */
93 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
94 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
95 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
96 #define IXGBE_8_BIT_MASK   UINT8_MAX
97
98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
99
100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
101
102 /* Additional timesync values. */
103 #define NSEC_PER_SEC             1000000000L
104 #define IXGBE_INCVAL_10GB        0x66666666
105 #define IXGBE_INCVAL_1GB         0x40000000
106 #define IXGBE_INCVAL_100         0x50000000
107 #define IXGBE_INCVAL_SHIFT_10GB  28
108 #define IXGBE_INCVAL_SHIFT_1GB   24
109 #define IXGBE_INCVAL_SHIFT_100   21
110 #define IXGBE_INCVAL_SHIFT_82599 7
111 #define IXGBE_INCPER_SHIFT_82599 24
112
113 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
114
115 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
117 #define IXGBE_ETAG_ETYPE                       0x00005084
118 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
119 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
120 #define IXGBE_RAH_ADTYPE                       0x40000000
121 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
122 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
125 #define IXGBE_QDE_STRIP_TAG                    0x00000004
126 #define IXGBE_VTEICR_MASK                      0x07
127
128 #define IXGBE_EXVET_VET_EXT_SHIFT              16
129 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
130
131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK           "pflink_fullchk"
132
133 static const char * const ixgbevf_valid_arguments[] = {
134         IXGBEVF_DEVARG_PFLINK_FULLCHK,
135         NULL
136 };
137
138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
145 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
146 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
147 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
148 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
149 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
150 static void ixgbe_dev_close(struct rte_eth_dev *dev);
151 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
157                                 int wait_to_complete);
158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
159                                 struct rte_eth_stats *stats);
160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
161                                 struct rte_eth_xstat *xstats, unsigned n);
162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
163                                   struct rte_eth_xstat *xstats, unsigned n);
164 static int
165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
166                 uint64_t *values, unsigned int n);
167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
170         struct rte_eth_xstat_name *xstats_names,
171         unsigned int size);
172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
173         struct rte_eth_xstat_name *xstats_names, unsigned limit);
174 static int ixgbe_dev_xstats_get_names_by_id(
175         struct rte_eth_dev *dev,
176         struct rte_eth_xstat_name *xstats_names,
177         const uint64_t *ids,
178         unsigned int limit);
179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
180                                              uint16_t queue_id,
181                                              uint8_t stat_idx,
182                                              uint8_t is_rx);
183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
184                                  size_t fw_size);
185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev,
186                               struct rte_eth_dev_info *dev_info);
187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev,
189                                 struct rte_eth_dev_info *dev_info);
190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
191
192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
193                 uint16_t vlan_id, int on);
194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
195                                enum rte_vlan_type vlan_type,
196                                uint16_t tpid_id);
197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
198                 uint16_t queue, bool on);
199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
200                 int on);
201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
202                                                   int mask);
203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
209
210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
213                                struct rte_eth_fc_conf *fc_conf);
214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
215                                struct rte_eth_fc_conf *fc_conf);
216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
217                 struct rte_eth_pfc_conf *pfc_conf);
218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
219                         struct rte_eth_rss_reta_entry64 *reta_conf,
220                         uint16_t reta_size);
221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
222                         struct rte_eth_rss_reta_entry64 *reta_conf,
223                         uint16_t reta_size);
224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
230 static void ixgbe_dev_interrupt_handler(void *param);
231 static void ixgbe_dev_interrupt_delayed_handler(void *param);
232 static void *ixgbe_dev_setup_link_thread_handler(void *param);
233 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev,
234                                               uint32_t timeout_ms);
235
236 static int ixgbe_add_rar(struct rte_eth_dev *dev,
237                         struct rte_ether_addr *mac_addr,
238                         uint32_t index, uint32_t pool);
239 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
240 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
241                                            struct rte_ether_addr *mac_addr);
242 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
243 static bool is_device_supported(struct rte_eth_dev *dev,
244                                 struct rte_pci_driver *drv);
245
246 /* For Virtual Function support */
247 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
248 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
249 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
250 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
251 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
252                                    int wait_to_complete);
253 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
254 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
255 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
256 static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
257 static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
258 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
259                 struct rte_eth_stats *stats);
260 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
261 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
262                 uint16_t vlan_id, int on);
263 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
264                 uint16_t queue, int on);
265 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
266 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
267 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
268 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
269                                             uint16_t queue_id);
270 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
271                                              uint16_t queue_id);
272 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
273                                  uint8_t queue, uint8_t msix_vector);
274 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
275 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev);
276 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev);
277 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
278 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
279
280 /* For Eth VMDQ APIs support */
281 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
282                 rte_ether_addr * mac_addr, uint8_t on);
283 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
284 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
285                 struct rte_eth_mirror_conf *mirror_conf,
286                 uint8_t rule_id, uint8_t on);
287 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
288                 uint8_t rule_id);
289 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
290                                           uint16_t queue_id);
291 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
292                                            uint16_t queue_id);
293 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
294                                uint8_t queue, uint8_t msix_vector);
295 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
296
297 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
298                                 struct rte_ether_addr *mac_addr,
299                                 uint32_t index, uint32_t pool);
300 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
301 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
302                                              struct rte_ether_addr *mac_addr);
303 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
304                         struct rte_eth_syn_filter *filter);
305 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
306                         enum rte_filter_op filter_op,
307                         void *arg);
308 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
309                         struct ixgbe_5tuple_filter *filter);
310 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
311                         struct ixgbe_5tuple_filter *filter);
312 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
313                                 enum rte_filter_op filter_op,
314                                 void *arg);
315 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
316                         struct rte_eth_ntuple_filter *filter);
317 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
318                                 enum rte_filter_op filter_op,
319                                 void *arg);
320 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
321                         struct rte_eth_ethertype_filter *filter);
322 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
323                      enum rte_filter_type filter_type,
324                      enum rte_filter_op filter_op,
325                      void *arg);
326 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
327
328 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
329                                       struct rte_ether_addr *mc_addr_set,
330                                       uint32_t nb_mc_addr);
331 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
332                                    struct rte_eth_dcb_info *dcb_info);
333
334 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
335 static int ixgbe_get_regs(struct rte_eth_dev *dev,
336                             struct rte_dev_reg_info *regs);
337 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
338 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
339                                 struct rte_dev_eeprom_info *eeprom);
340 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
341                                 struct rte_dev_eeprom_info *eeprom);
342
343 static int ixgbe_get_module_info(struct rte_eth_dev *dev,
344                                  struct rte_eth_dev_module_info *modinfo);
345 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
346                                    struct rte_dev_eeprom_info *info);
347
348 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
349 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
350                                 struct rte_dev_reg_info *regs);
351
352 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
353 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
354 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
355                                             struct timespec *timestamp,
356                                             uint32_t flags);
357 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
358                                             struct timespec *timestamp);
359 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
360 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
361                                    struct timespec *timestamp);
362 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
363                                    const struct timespec *timestamp);
364 static void ixgbevf_dev_interrupt_handler(void *param);
365
366 static int ixgbe_dev_l2_tunnel_eth_type_conf
367         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
368 static int ixgbe_dev_l2_tunnel_offload_set
369         (struct rte_eth_dev *dev,
370          struct rte_eth_l2_tunnel_conf *l2_tunnel,
371          uint32_t mask,
372          uint8_t en);
373 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
374                                              enum rte_filter_op filter_op,
375                                              void *arg);
376
377 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
378                                          struct rte_eth_udp_tunnel *udp_tunnel);
379 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
380                                          struct rte_eth_udp_tunnel *udp_tunnel);
381 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
382 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
383 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw);
384
385 /*
386  * Define VF Stats MACRO for Non "cleared on read" register
387  */
388 #define UPDATE_VF_STAT(reg, last, cur)                          \
389 {                                                               \
390         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
391         cur += (latest - last) & UINT_MAX;                      \
392         last = latest;                                          \
393 }
394
395 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
396 {                                                                \
397         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
398         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
399         u64 latest = ((new_msb << 32) | new_lsb);                \
400         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
401         last = latest;                                           \
402 }
403
404 #define IXGBE_SET_HWSTRIP(h, q) do {\
405                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
406                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
407                 (h)->bitmap[idx] |= 1 << bit;\
408         } while (0)
409
410 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
411                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
412                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
413                 (h)->bitmap[idx] &= ~(1 << bit);\
414         } while (0)
415
416 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
417                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
418                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
419                 (r) = (h)->bitmap[idx] >> bit & 1;\
420         } while (0)
421
422 /*
423  * The set of PCI devices this driver supports
424  */
425 static const struct rte_pci_id pci_id_ixgbe_map[] = {
426         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
427         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
428         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
429         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
430         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
431         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
432         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
433         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
434         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
435         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
436         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) },
474 #ifdef RTE_LIBRTE_IXGBE_BYPASS
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
476 #endif
477         { .vendor_id = 0, /* sentinel */ },
478 };
479
480 /*
481  * The set of PCI devices this driver supports (for 82599 VF)
482  */
483 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
485         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
486         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
487         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
488         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
489         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
490         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
491         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
492         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
493         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
494         { .vendor_id = 0, /* sentinel */ },
495 };
496
497 static const struct rte_eth_desc_lim rx_desc_lim = {
498         .nb_max = IXGBE_MAX_RING_DESC,
499         .nb_min = IXGBE_MIN_RING_DESC,
500         .nb_align = IXGBE_RXD_ALIGN,
501 };
502
503 static const struct rte_eth_desc_lim tx_desc_lim = {
504         .nb_max = IXGBE_MAX_RING_DESC,
505         .nb_min = IXGBE_MIN_RING_DESC,
506         .nb_align = IXGBE_TXD_ALIGN,
507         .nb_seg_max = IXGBE_TX_MAX_SEG,
508         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
509 };
510
511 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
512         .dev_configure        = ixgbe_dev_configure,
513         .dev_start            = ixgbe_dev_start,
514         .dev_stop             = ixgbe_dev_stop,
515         .dev_set_link_up    = ixgbe_dev_set_link_up,
516         .dev_set_link_down  = ixgbe_dev_set_link_down,
517         .dev_close            = ixgbe_dev_close,
518         .dev_reset            = ixgbe_dev_reset,
519         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
520         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
521         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
522         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
523         .link_update          = ixgbe_dev_link_update,
524         .stats_get            = ixgbe_dev_stats_get,
525         .xstats_get           = ixgbe_dev_xstats_get,
526         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
527         .stats_reset          = ixgbe_dev_stats_reset,
528         .xstats_reset         = ixgbe_dev_xstats_reset,
529         .xstats_get_names     = ixgbe_dev_xstats_get_names,
530         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
531         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
532         .fw_version_get       = ixgbe_fw_version_get,
533         .dev_infos_get        = ixgbe_dev_info_get,
534         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
535         .mtu_set              = ixgbe_dev_mtu_set,
536         .vlan_filter_set      = ixgbe_vlan_filter_set,
537         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
538         .vlan_offload_set     = ixgbe_vlan_offload_set,
539         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
540         .rx_queue_start       = ixgbe_dev_rx_queue_start,
541         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
542         .tx_queue_start       = ixgbe_dev_tx_queue_start,
543         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
544         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
545         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
546         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
547         .rx_queue_release     = ixgbe_dev_rx_queue_release,
548         .rx_queue_count       = ixgbe_dev_rx_queue_count,
549         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
550         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
551         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
552         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
553         .tx_queue_release     = ixgbe_dev_tx_queue_release,
554         .dev_led_on           = ixgbe_dev_led_on,
555         .dev_led_off          = ixgbe_dev_led_off,
556         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
557         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
558         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
559         .mac_addr_add         = ixgbe_add_rar,
560         .mac_addr_remove      = ixgbe_remove_rar,
561         .mac_addr_set         = ixgbe_set_default_mac_addr,
562         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
563         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
564         .mirror_rule_set      = ixgbe_mirror_rule_set,
565         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
566         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
567         .reta_update          = ixgbe_dev_rss_reta_update,
568         .reta_query           = ixgbe_dev_rss_reta_query,
569         .rss_hash_update      = ixgbe_dev_rss_hash_update,
570         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
571         .filter_ctrl          = ixgbe_dev_filter_ctrl,
572         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
573         .rxq_info_get         = ixgbe_rxq_info_get,
574         .txq_info_get         = ixgbe_txq_info_get,
575         .timesync_enable      = ixgbe_timesync_enable,
576         .timesync_disable     = ixgbe_timesync_disable,
577         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
578         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
579         .get_reg              = ixgbe_get_regs,
580         .get_eeprom_length    = ixgbe_get_eeprom_length,
581         .get_eeprom           = ixgbe_get_eeprom,
582         .set_eeprom           = ixgbe_set_eeprom,
583         .get_module_info      = ixgbe_get_module_info,
584         .get_module_eeprom    = ixgbe_get_module_eeprom,
585         .get_dcb_info         = ixgbe_dev_get_dcb_info,
586         .timesync_adjust_time = ixgbe_timesync_adjust_time,
587         .timesync_read_time   = ixgbe_timesync_read_time,
588         .timesync_write_time  = ixgbe_timesync_write_time,
589         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
590         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
591         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
592         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
593         .tm_ops_get           = ixgbe_tm_ops_get,
594         .tx_done_cleanup      = ixgbe_dev_tx_done_cleanup,
595 };
596
597 /*
598  * dev_ops for virtual function, bare necessities for basic vf
599  * operation have been implemented
600  */
601 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
602         .dev_configure        = ixgbevf_dev_configure,
603         .dev_start            = ixgbevf_dev_start,
604         .dev_stop             = ixgbevf_dev_stop,
605         .link_update          = ixgbevf_dev_link_update,
606         .stats_get            = ixgbevf_dev_stats_get,
607         .xstats_get           = ixgbevf_dev_xstats_get,
608         .stats_reset          = ixgbevf_dev_stats_reset,
609         .xstats_reset         = ixgbevf_dev_stats_reset,
610         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
611         .dev_close            = ixgbevf_dev_close,
612         .dev_reset            = ixgbevf_dev_reset,
613         .promiscuous_enable   = ixgbevf_dev_promiscuous_enable,
614         .promiscuous_disable  = ixgbevf_dev_promiscuous_disable,
615         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
616         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
617         .dev_infos_get        = ixgbevf_dev_info_get,
618         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
619         .mtu_set              = ixgbevf_dev_set_mtu,
620         .vlan_filter_set      = ixgbevf_vlan_filter_set,
621         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
622         .vlan_offload_set     = ixgbevf_vlan_offload_set,
623         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
624         .rx_queue_release     = ixgbe_dev_rx_queue_release,
625         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
626         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
627         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
628         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
629         .tx_queue_release     = ixgbe_dev_tx_queue_release,
630         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
631         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
632         .mac_addr_add         = ixgbevf_add_mac_addr,
633         .mac_addr_remove      = ixgbevf_remove_mac_addr,
634         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
635         .rxq_info_get         = ixgbe_rxq_info_get,
636         .txq_info_get         = ixgbe_txq_info_get,
637         .mac_addr_set         = ixgbevf_set_default_mac_addr,
638         .get_reg              = ixgbevf_get_regs,
639         .reta_update          = ixgbe_dev_rss_reta_update,
640         .reta_query           = ixgbe_dev_rss_reta_query,
641         .rss_hash_update      = ixgbe_dev_rss_hash_update,
642         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
643         .tx_done_cleanup      = ixgbe_dev_tx_done_cleanup,
644 };
645
646 /* store statistics names and its offset in stats structure */
647 struct rte_ixgbe_xstats_name_off {
648         char name[RTE_ETH_XSTATS_NAME_SIZE];
649         unsigned offset;
650 };
651
652 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
653         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
654         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
655         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
656         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
657         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
658         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
659         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
660         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
661         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
662         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
663         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
664         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
665         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
666         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
667         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
668                 prc1023)},
669         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
670                 prc1522)},
671         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
672         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
673         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
674         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
675         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
676         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
677         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
678         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
679         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
680         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
681         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
682         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
683         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
684         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
685         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
686         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
687         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
688                 ptc1023)},
689         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
690                 ptc1522)},
691         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
692         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
693         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
694         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
695
696         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
697                 fdirustat_add)},
698         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
699                 fdirustat_remove)},
700         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
701                 fdirfstat_fadd)},
702         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
703                 fdirfstat_fremove)},
704         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
705                 fdirmatch)},
706         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
707                 fdirmiss)},
708
709         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
710         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
711         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
712                 fclast)},
713         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
714         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
715         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
716         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
717         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
718                 fcoe_noddp)},
719         {"rx_fcoe_no_direct_data_placement_ext_buff",
720                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
721
722         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
723                 lxontxc)},
724         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
725                 lxonrxc)},
726         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
727                 lxofftxc)},
728         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
729                 lxoffrxc)},
730         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
731 };
732
733 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
734                            sizeof(rte_ixgbe_stats_strings[0]))
735
736 /* MACsec statistics */
737 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
738         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
739                 out_pkts_untagged)},
740         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
741                 out_pkts_encrypted)},
742         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
743                 out_pkts_protected)},
744         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
745                 out_octets_encrypted)},
746         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
747                 out_octets_protected)},
748         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
749                 in_pkts_untagged)},
750         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
751                 in_pkts_badtag)},
752         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
753                 in_pkts_nosci)},
754         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
755                 in_pkts_unknownsci)},
756         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
757                 in_octets_decrypted)},
758         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
759                 in_octets_validated)},
760         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
761                 in_pkts_unchecked)},
762         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
763                 in_pkts_delayed)},
764         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
765                 in_pkts_late)},
766         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
767                 in_pkts_ok)},
768         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
769                 in_pkts_invalid)},
770         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
771                 in_pkts_notvalid)},
772         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
773                 in_pkts_unusedsa)},
774         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
775                 in_pkts_notusingsa)},
776 };
777
778 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
779                            sizeof(rte_ixgbe_macsec_strings[0]))
780
781 /* Per-queue statistics */
782 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
783         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
784         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
785         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
786         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
787 };
788
789 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
790                            sizeof(rte_ixgbe_rxq_strings[0]))
791 #define IXGBE_NB_RXQ_PRIO_VALUES 8
792
793 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
794         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
795         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
796         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
797                 pxon2offc)},
798 };
799
800 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
801                            sizeof(rte_ixgbe_txq_strings[0]))
802 #define IXGBE_NB_TXQ_PRIO_VALUES 8
803
804 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
805         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
806 };
807
808 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
809                 sizeof(rte_ixgbevf_stats_strings[0]))
810
811 /*
812  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
813  */
814 static inline int
815 ixgbe_is_sfp(struct ixgbe_hw *hw)
816 {
817         switch (hw->phy.type) {
818         case ixgbe_phy_sfp_avago:
819         case ixgbe_phy_sfp_ftl:
820         case ixgbe_phy_sfp_intel:
821         case ixgbe_phy_sfp_unknown:
822         case ixgbe_phy_sfp_passive_tyco:
823         case ixgbe_phy_sfp_passive_unknown:
824                 return 1;
825         default:
826                 return 0;
827         }
828 }
829
830 static inline int32_t
831 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
832 {
833         uint32_t ctrl_ext;
834         int32_t status;
835
836         status = ixgbe_reset_hw(hw);
837
838         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
839         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
840         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
841         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
842         IXGBE_WRITE_FLUSH(hw);
843
844         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
845                 status = IXGBE_SUCCESS;
846         return status;
847 }
848
849 static inline void
850 ixgbe_enable_intr(struct rte_eth_dev *dev)
851 {
852         struct ixgbe_interrupt *intr =
853                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
854         struct ixgbe_hw *hw =
855                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
856
857         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
858         IXGBE_WRITE_FLUSH(hw);
859 }
860
861 /*
862  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
863  */
864 static void
865 ixgbe_disable_intr(struct ixgbe_hw *hw)
866 {
867         PMD_INIT_FUNC_TRACE();
868
869         if (hw->mac.type == ixgbe_mac_82598EB) {
870                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
871         } else {
872                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
873                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
874                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
875         }
876         IXGBE_WRITE_FLUSH(hw);
877 }
878
879 /*
880  * This function resets queue statistics mapping registers.
881  * From Niantic datasheet, Initialization of Statistics section:
882  * "...if software requires the queue counters, the RQSMR and TQSM registers
883  * must be re-programmed following a device reset.
884  */
885 static void
886 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
887 {
888         uint32_t i;
889
890         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
891                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
892                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
893         }
894 }
895
896
897 static int
898 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
899                                   uint16_t queue_id,
900                                   uint8_t stat_idx,
901                                   uint8_t is_rx)
902 {
903 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
904 #define NB_QMAP_FIELDS_PER_QSM_REG 4
905 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
906
907         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
908         struct ixgbe_stat_mapping_registers *stat_mappings =
909                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
910         uint32_t qsmr_mask = 0;
911         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
912         uint32_t q_map;
913         uint8_t n, offset;
914
915         if ((hw->mac.type != ixgbe_mac_82599EB) &&
916                 (hw->mac.type != ixgbe_mac_X540) &&
917                 (hw->mac.type != ixgbe_mac_X550) &&
918                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
919                 (hw->mac.type != ixgbe_mac_X550EM_a))
920                 return -ENOSYS;
921
922         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
923                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
924                      queue_id, stat_idx);
925
926         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
927         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
928                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
929                 return -EIO;
930         }
931         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
932
933         /* Now clear any previous stat_idx set */
934         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
935         if (!is_rx)
936                 stat_mappings->tqsm[n] &= ~clearing_mask;
937         else
938                 stat_mappings->rqsmr[n] &= ~clearing_mask;
939
940         q_map = (uint32_t)stat_idx;
941         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
942         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
943         if (!is_rx)
944                 stat_mappings->tqsm[n] |= qsmr_mask;
945         else
946                 stat_mappings->rqsmr[n] |= qsmr_mask;
947
948         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
949                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
950                      queue_id, stat_idx);
951         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
952                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
953
954         /* Now write the mapping in the appropriate register */
955         if (is_rx) {
956                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
957                              stat_mappings->rqsmr[n], n);
958                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
959         } else {
960                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
961                              stat_mappings->tqsm[n], n);
962                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
963         }
964         return 0;
965 }
966
967 static void
968 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
969 {
970         struct ixgbe_stat_mapping_registers *stat_mappings =
971                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
972         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
973         int i;
974
975         /* write whatever was in stat mapping table to the NIC */
976         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
977                 /* rx */
978                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
979
980                 /* tx */
981                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
982         }
983 }
984
985 static void
986 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
987 {
988         uint8_t i;
989         struct ixgbe_dcb_tc_config *tc;
990         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
991
992         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
993         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
994         for (i = 0; i < dcb_max_tc; i++) {
995                 tc = &dcb_config->tc_config[i];
996                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
997                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
998                                  (uint8_t)(100/dcb_max_tc + (i & 1));
999                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1000                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1001                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1002                 tc->pfc = ixgbe_dcb_pfc_disabled;
1003         }
1004
1005         /* Initialize default user to priority mapping, UPx->TC0 */
1006         tc = &dcb_config->tc_config[0];
1007         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1008         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1009         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1010                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1011                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1012         }
1013         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1014         dcb_config->pfc_mode_enable = false;
1015         dcb_config->vt_mode = true;
1016         dcb_config->round_robin_enable = false;
1017         /* support all DCB capabilities in 82599 */
1018         dcb_config->support.capabilities = 0xFF;
1019
1020         /*we only support 4 Tcs for X540, X550 */
1021         if (hw->mac.type == ixgbe_mac_X540 ||
1022                 hw->mac.type == ixgbe_mac_X550 ||
1023                 hw->mac.type == ixgbe_mac_X550EM_x ||
1024                 hw->mac.type == ixgbe_mac_X550EM_a) {
1025                 dcb_config->num_tcs.pg_tcs = 4;
1026                 dcb_config->num_tcs.pfc_tcs = 4;
1027         }
1028 }
1029
1030 /*
1031  * Ensure that all locks are released before first NVM or PHY access
1032  */
1033 static void
1034 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1035 {
1036         uint16_t mask;
1037
1038         /*
1039          * Phy lock should not fail in this early stage. If this is the case,
1040          * it is due to an improper exit of the application.
1041          * So force the release of the faulty lock. Release of common lock
1042          * is done automatically by swfw_sync function.
1043          */
1044         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1045         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1046                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1047         }
1048         ixgbe_release_swfw_semaphore(hw, mask);
1049
1050         /*
1051          * These ones are more tricky since they are common to all ports; but
1052          * swfw_sync retries last long enough (1s) to be almost sure that if
1053          * lock can not be taken it is due to an improper lock of the
1054          * semaphore.
1055          */
1056         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1057         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1058                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1059         }
1060         ixgbe_release_swfw_semaphore(hw, mask);
1061 }
1062
1063 /*
1064  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1065  * It returns 0 on success.
1066  */
1067 static int
1068 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
1069 {
1070         struct ixgbe_adapter *ad = eth_dev->data->dev_private;
1071         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1072         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1073         struct ixgbe_hw *hw =
1074                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1075         struct ixgbe_vfta *shadow_vfta =
1076                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1077         struct ixgbe_hwstrip *hwstrip =
1078                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1079         struct ixgbe_dcb_config *dcb_config =
1080                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1081         struct ixgbe_filter_info *filter_info =
1082                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1083         struct ixgbe_bw_conf *bw_conf =
1084                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1085         uint32_t ctrl_ext;
1086         uint16_t csum;
1087         int diag, i;
1088
1089         PMD_INIT_FUNC_TRACE();
1090
1091         ixgbe_dev_macsec_setting_reset(eth_dev);
1092
1093         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1094         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1095         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1096         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1097
1098         /*
1099          * For secondary processes, we don't initialise any further as primary
1100          * has already done this work. Only check we don't need a different
1101          * RX and TX function.
1102          */
1103         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1104                 struct ixgbe_tx_queue *txq;
1105                 /* TX queue function in primary, set by last queue initialized
1106                  * Tx queue may not initialized by primary process
1107                  */
1108                 if (eth_dev->data->tx_queues) {
1109                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1110                         ixgbe_set_tx_function(eth_dev, txq);
1111                 } else {
1112                         /* Use default TX function if we get here */
1113                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1114                                      "Using default TX function.");
1115                 }
1116
1117                 ixgbe_set_rx_function(eth_dev);
1118
1119                 return 0;
1120         }
1121
1122         rte_atomic32_clear(&ad->link_thread_running);
1123         rte_eth_copy_pci_info(eth_dev, pci_dev);
1124
1125         /* Vendor and Device ID need to be set before init of shared code */
1126         hw->device_id = pci_dev->id.device_id;
1127         hw->vendor_id = pci_dev->id.vendor_id;
1128         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1129         hw->allow_unsupported_sfp = 1;
1130
1131         /* Initialize the shared code (base driver) */
1132 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1133         diag = ixgbe_bypass_init_shared_code(hw);
1134 #else
1135         diag = ixgbe_init_shared_code(hw);
1136 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1137
1138         if (diag != IXGBE_SUCCESS) {
1139                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1140                 return -EIO;
1141         }
1142
1143         if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
1144                 PMD_INIT_LOG(ERR, "\nERROR: "
1145                         "Firmware recovery mode detected. Limiting functionality.\n"
1146                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1147                         "User Guide for details on firmware recovery mode.");
1148                 return -EIO;
1149         }
1150
1151         /* pick up the PCI bus settings for reporting later */
1152         ixgbe_get_bus_info(hw);
1153
1154         /* Unlock any pending hardware semaphore */
1155         ixgbe_swfw_lock_reset(hw);
1156
1157 #ifdef RTE_LIBRTE_SECURITY
1158         /* Initialize security_ctx only for primary process*/
1159         if (ixgbe_ipsec_ctx_create(eth_dev))
1160                 return -ENOMEM;
1161 #endif
1162
1163         /* Initialize DCB configuration*/
1164         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1165         ixgbe_dcb_init(hw, dcb_config);
1166         /* Get Hardware Flow Control setting */
1167         hw->fc.requested_mode = ixgbe_fc_none;
1168         hw->fc.current_mode = ixgbe_fc_none;
1169         hw->fc.pause_time = IXGBE_FC_PAUSE;
1170         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1171                 hw->fc.low_water[i] = IXGBE_FC_LO;
1172                 hw->fc.high_water[i] = IXGBE_FC_HI;
1173         }
1174         hw->fc.send_xon = 1;
1175
1176         /* Make sure we have a good EEPROM before we read from it */
1177         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1178         if (diag != IXGBE_SUCCESS) {
1179                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1180                 return -EIO;
1181         }
1182
1183 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1184         diag = ixgbe_bypass_init_hw(hw);
1185 #else
1186         diag = ixgbe_init_hw(hw);
1187 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1188
1189         /*
1190          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1191          * is called too soon after the kernel driver unbinding/binding occurs.
1192          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1193          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1194          * also called. See ixgbe_identify_phy_82599(). The reason for the
1195          * failure is not known, and only occuts when virtualisation features
1196          * are disabled in the bios. A delay of 100ms  was found to be enough by
1197          * trial-and-error, and is doubled to be safe.
1198          */
1199         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1200                 rte_delay_ms(200);
1201                 diag = ixgbe_init_hw(hw);
1202         }
1203
1204         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1205                 diag = IXGBE_SUCCESS;
1206
1207         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1208                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1209                              "LOM.  Please be aware there may be issues associated "
1210                              "with your hardware.");
1211                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1212                              "please contact your Intel or hardware representative "
1213                              "who provided you with this hardware.");
1214         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1215                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1216         if (diag) {
1217                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1218                 return -EIO;
1219         }
1220
1221         /* Reset the hw statistics */
1222         ixgbe_dev_stats_reset(eth_dev);
1223
1224         /* disable interrupt */
1225         ixgbe_disable_intr(hw);
1226
1227         /* reset mappings for queue statistics hw counters*/
1228         ixgbe_reset_qstat_mappings(hw);
1229
1230         /* Allocate memory for storing MAC addresses */
1231         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN *
1232                                                hw->mac.num_rar_entries, 0);
1233         if (eth_dev->data->mac_addrs == NULL) {
1234                 PMD_INIT_LOG(ERR,
1235                              "Failed to allocate %u bytes needed to store "
1236                              "MAC addresses",
1237                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1238                 return -ENOMEM;
1239         }
1240         /* Copy the permanent MAC address */
1241         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1242                         &eth_dev->data->mac_addrs[0]);
1243
1244         /* Allocate memory for storing hash filter MAC addresses */
1245         eth_dev->data->hash_mac_addrs = rte_zmalloc(
1246                 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0);
1247         if (eth_dev->data->hash_mac_addrs == NULL) {
1248                 PMD_INIT_LOG(ERR,
1249                              "Failed to allocate %d bytes needed to store MAC addresses",
1250                              RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1251                 return -ENOMEM;
1252         }
1253
1254         /* Pass the information to the rte_eth_dev_close() that it should also
1255          * release the private port resources.
1256          */
1257         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1258
1259         /* initialize the vfta */
1260         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1261
1262         /* initialize the hw strip bitmap*/
1263         memset(hwstrip, 0, sizeof(*hwstrip));
1264
1265         /* initialize PF if max_vfs not zero */
1266         ixgbe_pf_host_init(eth_dev);
1267
1268         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1269         /* let hardware know driver is loaded */
1270         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1271         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1272         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1273         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1274         IXGBE_WRITE_FLUSH(hw);
1275
1276         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1277                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1278                              (int) hw->mac.type, (int) hw->phy.type,
1279                              (int) hw->phy.sfp_type);
1280         else
1281                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1282                              (int) hw->mac.type, (int) hw->phy.type);
1283
1284         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1285                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1286                      pci_dev->id.device_id);
1287
1288         rte_intr_callback_register(intr_handle,
1289                                    ixgbe_dev_interrupt_handler, eth_dev);
1290
1291         /* enable uio/vfio intr/eventfd mapping */
1292         rte_intr_enable(intr_handle);
1293
1294         /* enable support intr */
1295         ixgbe_enable_intr(eth_dev);
1296
1297         /* initialize filter info */
1298         memset(filter_info, 0,
1299                sizeof(struct ixgbe_filter_info));
1300
1301         /* initialize 5tuple filter list */
1302         TAILQ_INIT(&filter_info->fivetuple_list);
1303
1304         /* initialize flow director filter list & hash */
1305         ixgbe_fdir_filter_init(eth_dev);
1306
1307         /* initialize l2 tunnel filter list & hash */
1308         ixgbe_l2_tn_filter_init(eth_dev);
1309
1310         /* initialize flow filter lists */
1311         ixgbe_filterlist_init();
1312
1313         /* initialize bandwidth configuration info */
1314         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1315
1316         /* initialize Traffic Manager configuration */
1317         ixgbe_tm_conf_init(eth_dev);
1318
1319         return 0;
1320 }
1321
1322 static int
1323 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1324 {
1325         PMD_INIT_FUNC_TRACE();
1326
1327         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1328                 return 0;
1329
1330         ixgbe_dev_close(eth_dev);
1331
1332         return 0;
1333 }
1334
1335 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1336 {
1337         struct ixgbe_filter_info *filter_info =
1338                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1339         struct ixgbe_5tuple_filter *p_5tuple;
1340
1341         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1342                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1343                              p_5tuple,
1344                              entries);
1345                 rte_free(p_5tuple);
1346         }
1347         memset(filter_info->fivetuple_mask, 0,
1348                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1349
1350         return 0;
1351 }
1352
1353 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1354 {
1355         struct ixgbe_hw_fdir_info *fdir_info =
1356                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1357         struct ixgbe_fdir_filter *fdir_filter;
1358
1359                 if (fdir_info->hash_map)
1360                 rte_free(fdir_info->hash_map);
1361         if (fdir_info->hash_handle)
1362                 rte_hash_free(fdir_info->hash_handle);
1363
1364         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1365                 TAILQ_REMOVE(&fdir_info->fdir_list,
1366                              fdir_filter,
1367                              entries);
1368                 rte_free(fdir_filter);
1369         }
1370
1371         return 0;
1372 }
1373
1374 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1375 {
1376         struct ixgbe_l2_tn_info *l2_tn_info =
1377                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1378         struct ixgbe_l2_tn_filter *l2_tn_filter;
1379
1380         if (l2_tn_info->hash_map)
1381                 rte_free(l2_tn_info->hash_map);
1382         if (l2_tn_info->hash_handle)
1383                 rte_hash_free(l2_tn_info->hash_handle);
1384
1385         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1386                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1387                              l2_tn_filter,
1388                              entries);
1389                 rte_free(l2_tn_filter);
1390         }
1391
1392         return 0;
1393 }
1394
1395 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1396 {
1397         struct ixgbe_hw_fdir_info *fdir_info =
1398                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1399         char fdir_hash_name[RTE_HASH_NAMESIZE];
1400         struct rte_hash_parameters fdir_hash_params = {
1401                 .name = fdir_hash_name,
1402                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1403                 .key_len = sizeof(union ixgbe_atr_input),
1404                 .hash_func = rte_hash_crc,
1405                 .hash_func_init_val = 0,
1406                 .socket_id = rte_socket_id(),
1407         };
1408
1409         TAILQ_INIT(&fdir_info->fdir_list);
1410         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1411                  "fdir_%s", eth_dev->device->name);
1412         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1413         if (!fdir_info->hash_handle) {
1414                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1415                 return -EINVAL;
1416         }
1417         fdir_info->hash_map = rte_zmalloc("ixgbe",
1418                                           sizeof(struct ixgbe_fdir_filter *) *
1419                                           IXGBE_MAX_FDIR_FILTER_NUM,
1420                                           0);
1421         if (!fdir_info->hash_map) {
1422                 PMD_INIT_LOG(ERR,
1423                              "Failed to allocate memory for fdir hash map!");
1424                 return -ENOMEM;
1425         }
1426         fdir_info->mask_added = FALSE;
1427
1428         return 0;
1429 }
1430
1431 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1432 {
1433         struct ixgbe_l2_tn_info *l2_tn_info =
1434                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1435         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1436         struct rte_hash_parameters l2_tn_hash_params = {
1437                 .name = l2_tn_hash_name,
1438                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1439                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1440                 .hash_func = rte_hash_crc,
1441                 .hash_func_init_val = 0,
1442                 .socket_id = rte_socket_id(),
1443         };
1444
1445         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1446         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1447                  "l2_tn_%s", eth_dev->device->name);
1448         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1449         if (!l2_tn_info->hash_handle) {
1450                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1451                 return -EINVAL;
1452         }
1453         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1454                                    sizeof(struct ixgbe_l2_tn_filter *) *
1455                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1456                                    0);
1457         if (!l2_tn_info->hash_map) {
1458                 PMD_INIT_LOG(ERR,
1459                         "Failed to allocate memory for L2 TN hash map!");
1460                 return -ENOMEM;
1461         }
1462         l2_tn_info->e_tag_en = FALSE;
1463         l2_tn_info->e_tag_fwd_en = FALSE;
1464         l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
1465
1466         return 0;
1467 }
1468 /*
1469  * Negotiate mailbox API version with the PF.
1470  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1471  * Then we try to negotiate starting with the most recent one.
1472  * If all negotiation attempts fail, then we will proceed with
1473  * the default one (ixgbe_mbox_api_10).
1474  */
1475 static void
1476 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1477 {
1478         int32_t i;
1479
1480         /* start with highest supported, proceed down */
1481         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1482                 ixgbe_mbox_api_13,
1483                 ixgbe_mbox_api_12,
1484                 ixgbe_mbox_api_11,
1485                 ixgbe_mbox_api_10,
1486         };
1487
1488         for (i = 0;
1489                         i != RTE_DIM(sup_ver) &&
1490                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1491                         i++)
1492                 ;
1493 }
1494
1495 static void
1496 generate_random_mac_addr(struct rte_ether_addr *mac_addr)
1497 {
1498         uint64_t random;
1499
1500         /* Set Organizationally Unique Identifier (OUI) prefix. */
1501         mac_addr->addr_bytes[0] = 0x00;
1502         mac_addr->addr_bytes[1] = 0x09;
1503         mac_addr->addr_bytes[2] = 0xC0;
1504         /* Force indication of locally assigned MAC address. */
1505         mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1506         /* Generate the last 3 bytes of the MAC address with a random number. */
1507         random = rte_rand();
1508         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1509 }
1510
1511 static int
1512 devarg_handle_int(__rte_unused const char *key, const char *value,
1513                   void *extra_args)
1514 {
1515         uint16_t *n = extra_args;
1516
1517         if (value == NULL || extra_args == NULL)
1518                 return -EINVAL;
1519
1520         *n = (uint16_t)strtoul(value, NULL, 0);
1521         if (*n == USHRT_MAX && errno == ERANGE)
1522                 return -1;
1523
1524         return 0;
1525 }
1526
1527 static void
1528 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter,
1529                       struct rte_devargs *devargs)
1530 {
1531         struct rte_kvargs *kvlist;
1532         uint16_t pflink_fullchk;
1533
1534         if (devargs == NULL)
1535                 return;
1536
1537         kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments);
1538         if (kvlist == NULL)
1539                 return;
1540
1541         if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 &&
1542             rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK,
1543                                devarg_handle_int, &pflink_fullchk) == 0 &&
1544             pflink_fullchk == 1)
1545                 adapter->pflink_fullchk = 1;
1546
1547         rte_kvargs_free(kvlist);
1548 }
1549
1550 /*
1551  * Virtual Function device init
1552  */
1553 static int
1554 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1555 {
1556         int diag;
1557         uint32_t tc, tcs;
1558         struct ixgbe_adapter *ad = eth_dev->data->dev_private;
1559         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1560         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1561         struct ixgbe_hw *hw =
1562                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1563         struct ixgbe_vfta *shadow_vfta =
1564                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1565         struct ixgbe_hwstrip *hwstrip =
1566                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1567         struct rte_ether_addr *perm_addr =
1568                 (struct rte_ether_addr *)hw->mac.perm_addr;
1569
1570         PMD_INIT_FUNC_TRACE();
1571
1572         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1573         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1574         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1575
1576         /* for secondary processes, we don't initialise any further as primary
1577          * has already done this work. Only check we don't need a different
1578          * RX function
1579          */
1580         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1581                 struct ixgbe_tx_queue *txq;
1582                 /* TX queue function in primary, set by last queue initialized
1583                  * Tx queue may not initialized by primary process
1584                  */
1585                 if (eth_dev->data->tx_queues) {
1586                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1587                         ixgbe_set_tx_function(eth_dev, txq);
1588                 } else {
1589                         /* Use default TX function if we get here */
1590                         PMD_INIT_LOG(NOTICE,
1591                                      "No TX queues configured yet. Using default TX function.");
1592                 }
1593
1594                 ixgbe_set_rx_function(eth_dev);
1595
1596                 return 0;
1597         }
1598
1599         rte_atomic32_clear(&ad->link_thread_running);
1600         ixgbevf_parse_devargs(eth_dev->data->dev_private,
1601                               pci_dev->device.devargs);
1602
1603         rte_eth_copy_pci_info(eth_dev, pci_dev);
1604
1605         hw->device_id = pci_dev->id.device_id;
1606         hw->vendor_id = pci_dev->id.vendor_id;
1607         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1608
1609         /* initialize the vfta */
1610         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1611
1612         /* initialize the hw strip bitmap*/
1613         memset(hwstrip, 0, sizeof(*hwstrip));
1614
1615         /* Initialize the shared code (base driver) */
1616         diag = ixgbe_init_shared_code(hw);
1617         if (diag != IXGBE_SUCCESS) {
1618                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1619                 return -EIO;
1620         }
1621
1622         /* init_mailbox_params */
1623         hw->mbx.ops.init_params(hw);
1624
1625         /* Reset the hw statistics */
1626         ixgbevf_dev_stats_reset(eth_dev);
1627
1628         /* Disable the interrupts for VF */
1629         ixgbevf_intr_disable(eth_dev);
1630
1631         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1632         diag = hw->mac.ops.reset_hw(hw);
1633
1634         /*
1635          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1636          * the underlying PF driver has not assigned a MAC address to the VF.
1637          * In this case, assign a random MAC address.
1638          */
1639         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1640                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1641                 /*
1642                  * This error code will be propagated to the app by
1643                  * rte_eth_dev_reset, so use a public error code rather than
1644                  * the internal-only IXGBE_ERR_RESET_FAILED
1645                  */
1646                 return -EAGAIN;
1647         }
1648
1649         /* negotiate mailbox API version to use with the PF. */
1650         ixgbevf_negotiate_api(hw);
1651
1652         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1653         ixgbevf_get_queues(hw, &tcs, &tc);
1654
1655         /* Allocate memory for storing MAC addresses */
1656         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN *
1657                                                hw->mac.num_rar_entries, 0);
1658         if (eth_dev->data->mac_addrs == NULL) {
1659                 PMD_INIT_LOG(ERR,
1660                              "Failed to allocate %u bytes needed to store "
1661                              "MAC addresses",
1662                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1663                 return -ENOMEM;
1664         }
1665
1666         /* Pass the information to the rte_eth_dev_close() that it should also
1667          * release the private port resources.
1668          */
1669         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1670
1671         /* Generate a random MAC address, if none was assigned by PF. */
1672         if (rte_is_zero_ether_addr(perm_addr)) {
1673                 generate_random_mac_addr(perm_addr);
1674                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1675                 if (diag) {
1676                         rte_free(eth_dev->data->mac_addrs);
1677                         eth_dev->data->mac_addrs = NULL;
1678                         return diag;
1679                 }
1680                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1681                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1682                              "%02x:%02x:%02x:%02x:%02x:%02x",
1683                              perm_addr->addr_bytes[0],
1684                              perm_addr->addr_bytes[1],
1685                              perm_addr->addr_bytes[2],
1686                              perm_addr->addr_bytes[3],
1687                              perm_addr->addr_bytes[4],
1688                              perm_addr->addr_bytes[5]);
1689         }
1690
1691         /* Copy the permanent MAC address */
1692         rte_ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1693
1694         /* reset the hardware with the new settings */
1695         diag = hw->mac.ops.start_hw(hw);
1696         switch (diag) {
1697         case  0:
1698                 break;
1699
1700         default:
1701                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1702                 return -EIO;
1703         }
1704
1705         rte_intr_callback_register(intr_handle,
1706                                    ixgbevf_dev_interrupt_handler, eth_dev);
1707         rte_intr_enable(intr_handle);
1708         ixgbevf_intr_enable(eth_dev);
1709
1710         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1711                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1712                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1713
1714         return 0;
1715 }
1716
1717 /* Virtual Function device uninit */
1718
1719 static int
1720 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1721 {
1722         PMD_INIT_FUNC_TRACE();
1723
1724         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1725                 return 0;
1726
1727         ixgbevf_dev_close(eth_dev);
1728
1729         return 0;
1730 }
1731
1732 static int
1733 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1734                 struct rte_pci_device *pci_dev)
1735 {
1736         char name[RTE_ETH_NAME_MAX_LEN];
1737         struct rte_eth_dev *pf_ethdev;
1738         struct rte_eth_devargs eth_da;
1739         int i, retval;
1740
1741         if (pci_dev->device.devargs) {
1742                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1743                                 &eth_da);
1744                 if (retval)
1745                         return retval;
1746         } else
1747                 memset(&eth_da, 0, sizeof(eth_da));
1748
1749         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1750                 sizeof(struct ixgbe_adapter),
1751                 eth_dev_pci_specific_init, pci_dev,
1752                 eth_ixgbe_dev_init, NULL);
1753
1754         if (retval || eth_da.nb_representor_ports < 1)
1755                 return retval;
1756
1757         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1758         if (pf_ethdev == NULL)
1759                 return -ENODEV;
1760
1761         /* probe VF representor ports */
1762         for (i = 0; i < eth_da.nb_representor_ports; i++) {
1763                 struct ixgbe_vf_info *vfinfo;
1764                 struct ixgbe_vf_representor representor;
1765
1766                 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
1767                         pf_ethdev->data->dev_private);
1768                 if (vfinfo == NULL) {
1769                         PMD_DRV_LOG(ERR,
1770                                 "no virtual functions supported by PF");
1771                         break;
1772                 }
1773
1774                 representor.vf_id = eth_da.representor_ports[i];
1775                 representor.switch_domain_id = vfinfo->switch_domain_id;
1776                 representor.pf_ethdev = pf_ethdev;
1777
1778                 /* representor port net_bdf_port */
1779                 snprintf(name, sizeof(name), "net_%s_representor_%d",
1780                         pci_dev->device.name,
1781                         eth_da.representor_ports[i]);
1782
1783                 retval = rte_eth_dev_create(&pci_dev->device, name,
1784                         sizeof(struct ixgbe_vf_representor), NULL, NULL,
1785                         ixgbe_vf_representor_init, &representor);
1786
1787                 if (retval)
1788                         PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
1789                                 "representor %s.", name);
1790         }
1791
1792         return 0;
1793 }
1794
1795 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1796 {
1797         struct rte_eth_dev *ethdev;
1798
1799         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1800         if (!ethdev)
1801                 return 0;
1802
1803         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1804                 return rte_eth_dev_pci_generic_remove(pci_dev,
1805                                         ixgbe_vf_representor_uninit);
1806         else
1807                 return rte_eth_dev_pci_generic_remove(pci_dev,
1808                                                 eth_ixgbe_dev_uninit);
1809 }
1810
1811 static struct rte_pci_driver rte_ixgbe_pmd = {
1812         .id_table = pci_id_ixgbe_map,
1813         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1814         .probe = eth_ixgbe_pci_probe,
1815         .remove = eth_ixgbe_pci_remove,
1816 };
1817
1818 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1819         struct rte_pci_device *pci_dev)
1820 {
1821         return rte_eth_dev_pci_generic_probe(pci_dev,
1822                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1823 }
1824
1825 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1826 {
1827         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1828 }
1829
1830 /*
1831  * virtual function driver struct
1832  */
1833 static struct rte_pci_driver rte_ixgbevf_pmd = {
1834         .id_table = pci_id_ixgbevf_map,
1835         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1836         .probe = eth_ixgbevf_pci_probe,
1837         .remove = eth_ixgbevf_pci_remove,
1838 };
1839
1840 static int
1841 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1842 {
1843         struct ixgbe_hw *hw =
1844                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1845         struct ixgbe_vfta *shadow_vfta =
1846                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1847         uint32_t vfta;
1848         uint32_t vid_idx;
1849         uint32_t vid_bit;
1850
1851         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1852         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1853         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1854         if (on)
1855                 vfta |= vid_bit;
1856         else
1857                 vfta &= ~vid_bit;
1858         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1859
1860         /* update local VFTA copy */
1861         shadow_vfta->vfta[vid_idx] = vfta;
1862
1863         return 0;
1864 }
1865
1866 static void
1867 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1868 {
1869         if (on)
1870                 ixgbe_vlan_hw_strip_enable(dev, queue);
1871         else
1872                 ixgbe_vlan_hw_strip_disable(dev, queue);
1873 }
1874
1875 static int
1876 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1877                     enum rte_vlan_type vlan_type,
1878                     uint16_t tpid)
1879 {
1880         struct ixgbe_hw *hw =
1881                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1882         int ret = 0;
1883         uint32_t reg;
1884         uint32_t qinq;
1885
1886         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1887         qinq &= IXGBE_DMATXCTL_GDV;
1888
1889         switch (vlan_type) {
1890         case ETH_VLAN_TYPE_INNER:
1891                 if (qinq) {
1892                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1893                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1894                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1895                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1896                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1897                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1898                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1899                 } else {
1900                         ret = -ENOTSUP;
1901                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1902                                     " by single VLAN");
1903                 }
1904                 break;
1905         case ETH_VLAN_TYPE_OUTER:
1906                 if (qinq) {
1907                         /* Only the high 16-bits is valid */
1908                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1909                                         IXGBE_EXVET_VET_EXT_SHIFT);
1910                 } else {
1911                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1912                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1913                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1914                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1915                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1916                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1917                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1918                 }
1919
1920                 break;
1921         default:
1922                 ret = -EINVAL;
1923                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1924                 break;
1925         }
1926
1927         return ret;
1928 }
1929
1930 void
1931 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1932 {
1933         struct ixgbe_hw *hw =
1934                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1935         uint32_t vlnctrl;
1936
1937         PMD_INIT_FUNC_TRACE();
1938
1939         /* Filter Table Disable */
1940         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1941         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1942
1943         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1944 }
1945
1946 void
1947 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1948 {
1949         struct ixgbe_hw *hw =
1950                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1951         struct ixgbe_vfta *shadow_vfta =
1952                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1953         uint32_t vlnctrl;
1954         uint16_t i;
1955
1956         PMD_INIT_FUNC_TRACE();
1957
1958         /* Filter Table Enable */
1959         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1960         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1961         vlnctrl |= IXGBE_VLNCTRL_VFE;
1962
1963         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1964
1965         /* write whatever is in local vfta copy */
1966         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1967                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1968 }
1969
1970 static void
1971 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1972 {
1973         struct ixgbe_hwstrip *hwstrip =
1974                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1975         struct ixgbe_rx_queue *rxq;
1976
1977         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1978                 return;
1979
1980         if (on)
1981                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1982         else
1983                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1984
1985         if (queue >= dev->data->nb_rx_queues)
1986                 return;
1987
1988         rxq = dev->data->rx_queues[queue];
1989
1990         if (on) {
1991                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1992                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1993         } else {
1994                 rxq->vlan_flags = PKT_RX_VLAN;
1995                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1996         }
1997 }
1998
1999 static void
2000 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
2001 {
2002         struct ixgbe_hw *hw =
2003                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2004         uint32_t ctrl;
2005
2006         PMD_INIT_FUNC_TRACE();
2007
2008         if (hw->mac.type == ixgbe_mac_82598EB) {
2009                 /* No queue level support */
2010                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2011                 return;
2012         }
2013
2014         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2015         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2016         ctrl &= ~IXGBE_RXDCTL_VME;
2017         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2018
2019         /* record those setting for HW strip per queue */
2020         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
2021 }
2022
2023 static void
2024 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
2025 {
2026         struct ixgbe_hw *hw =
2027                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2028         uint32_t ctrl;
2029
2030         PMD_INIT_FUNC_TRACE();
2031
2032         if (hw->mac.type == ixgbe_mac_82598EB) {
2033                 /* No queue level supported */
2034                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2035                 return;
2036         }
2037
2038         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2039         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2040         ctrl |= IXGBE_RXDCTL_VME;
2041         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2042
2043         /* record those setting for HW strip per queue */
2044         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2045 }
2046
2047 static void
2048 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2049 {
2050         struct ixgbe_hw *hw =
2051                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2052         uint32_t ctrl;
2053
2054         PMD_INIT_FUNC_TRACE();
2055
2056         /* DMATXCTRL: Geric Double VLAN Disable */
2057         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2058         ctrl &= ~IXGBE_DMATXCTL_GDV;
2059         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2060
2061         /* CTRL_EXT: Global Double VLAN Disable */
2062         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2063         ctrl &= ~IXGBE_EXTENDED_VLAN;
2064         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2065
2066 }
2067
2068 static void
2069 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2070 {
2071         struct ixgbe_hw *hw =
2072                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2073         uint32_t ctrl;
2074
2075         PMD_INIT_FUNC_TRACE();
2076
2077         /* DMATXCTRL: Geric Double VLAN Enable */
2078         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2079         ctrl |= IXGBE_DMATXCTL_GDV;
2080         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2081
2082         /* CTRL_EXT: Global Double VLAN Enable */
2083         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2084         ctrl |= IXGBE_EXTENDED_VLAN;
2085         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2086
2087         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2088         if (hw->mac.type == ixgbe_mac_X550 ||
2089             hw->mac.type == ixgbe_mac_X550EM_x ||
2090             hw->mac.type == ixgbe_mac_X550EM_a) {
2091                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2092                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2093                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2094         }
2095
2096         /*
2097          * VET EXT field in the EXVET register = 0x8100 by default
2098          * So no need to change. Same to VT field of DMATXCTL register
2099          */
2100 }
2101
2102 void
2103 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
2104 {
2105         struct ixgbe_hw *hw =
2106                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2107         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2108         uint32_t ctrl;
2109         uint16_t i;
2110         struct ixgbe_rx_queue *rxq;
2111         bool on;
2112
2113         PMD_INIT_FUNC_TRACE();
2114
2115         if (hw->mac.type == ixgbe_mac_82598EB) {
2116                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2117                         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2118                         ctrl |= IXGBE_VLNCTRL_VME;
2119                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2120                 } else {
2121                         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2122                         ctrl &= ~IXGBE_VLNCTRL_VME;
2123                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2124                 }
2125         } else {
2126                 /*
2127                  * Other 10G NIC, the VLAN strip can be setup
2128                  * per queue in RXDCTL
2129                  */
2130                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2131                         rxq = dev->data->rx_queues[i];
2132                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2133                         if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2134                                 ctrl |= IXGBE_RXDCTL_VME;
2135                                 on = TRUE;
2136                         } else {
2137                                 ctrl &= ~IXGBE_RXDCTL_VME;
2138                                 on = FALSE;
2139                         }
2140                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2141
2142                         /* record those setting for HW strip per queue */
2143                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
2144                 }
2145         }
2146 }
2147
2148 static void
2149 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
2150 {
2151         uint16_t i;
2152         struct rte_eth_rxmode *rxmode;
2153         struct ixgbe_rx_queue *rxq;
2154
2155         if (mask & ETH_VLAN_STRIP_MASK) {
2156                 rxmode = &dev->data->dev_conf.rxmode;
2157                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2158                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2159                                 rxq = dev->data->rx_queues[i];
2160                                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2161                         }
2162                 else
2163                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2164                                 rxq = dev->data->rx_queues[i];
2165                                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2166                         }
2167         }
2168 }
2169
2170 static int
2171 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2172 {
2173         struct rte_eth_rxmode *rxmode;
2174         rxmode = &dev->data->dev_conf.rxmode;
2175
2176         if (mask & ETH_VLAN_STRIP_MASK) {
2177                 ixgbe_vlan_hw_strip_config(dev);
2178         }
2179
2180         if (mask & ETH_VLAN_FILTER_MASK) {
2181                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2182                         ixgbe_vlan_hw_filter_enable(dev);
2183                 else
2184                         ixgbe_vlan_hw_filter_disable(dev);
2185         }
2186
2187         if (mask & ETH_VLAN_EXTEND_MASK) {
2188                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2189                         ixgbe_vlan_hw_extend_enable(dev);
2190                 else
2191                         ixgbe_vlan_hw_extend_disable(dev);
2192         }
2193
2194         return 0;
2195 }
2196
2197 static int
2198 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2199 {
2200         ixgbe_config_vlan_strip_on_all_queues(dev, mask);
2201
2202         ixgbe_vlan_offload_config(dev, mask);
2203
2204         return 0;
2205 }
2206
2207 static void
2208 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2209 {
2210         struct ixgbe_hw *hw =
2211                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2212         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2213         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2214
2215         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2216         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2217 }
2218
2219 static int
2220 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2221 {
2222         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2223
2224         switch (nb_rx_q) {
2225         case 1:
2226         case 2:
2227                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2228                 break;
2229         case 4:
2230                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2231                 break;
2232         default:
2233                 return -EINVAL;
2234         }
2235
2236         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
2237                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2238         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
2239                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2240         return 0;
2241 }
2242
2243 static int
2244 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2245 {
2246         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2247         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2248         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2249         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2250
2251         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2252                 /* check multi-queue mode */
2253                 switch (dev_conf->rxmode.mq_mode) {
2254                 case ETH_MQ_RX_VMDQ_DCB:
2255                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2256                         break;
2257                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2258                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2259                         PMD_INIT_LOG(ERR, "SRIOV active,"
2260                                         " unsupported mq_mode rx %d.",
2261                                         dev_conf->rxmode.mq_mode);
2262                         return -EINVAL;
2263                 case ETH_MQ_RX_RSS:
2264                 case ETH_MQ_RX_VMDQ_RSS:
2265                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2266                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2267                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2268                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2269                                                 " invalid queue number"
2270                                                 " for VMDQ RSS, allowed"
2271                                                 " value are 1, 2 or 4.");
2272                                         return -EINVAL;
2273                                 }
2274                         break;
2275                 case ETH_MQ_RX_VMDQ_ONLY:
2276                 case ETH_MQ_RX_NONE:
2277                         /* if nothing mq mode configure, use default scheme */
2278                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2279                         break;
2280                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2281                         /* SRIOV only works in VMDq enable mode */
2282                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2283                                         " wrong mq_mode rx %d.",
2284                                         dev_conf->rxmode.mq_mode);
2285                         return -EINVAL;
2286                 }
2287
2288                 switch (dev_conf->txmode.mq_mode) {
2289                 case ETH_MQ_TX_VMDQ_DCB:
2290                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2291                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2292                         break;
2293                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2294                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2295                         break;
2296                 }
2297
2298                 /* check valid queue number */
2299                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2300                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2301                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2302                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2303                                         " must be less than or equal to %d.",
2304                                         nb_rx_q, nb_tx_q,
2305                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2306                         return -EINVAL;
2307                 }
2308         } else {
2309                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2310                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2311                                           " not supported.");
2312                         return -EINVAL;
2313                 }
2314                 /* check configuration for vmdb+dcb mode */
2315                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2316                         const struct rte_eth_vmdq_dcb_conf *conf;
2317
2318                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2319                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2320                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2321                                 return -EINVAL;
2322                         }
2323                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2324                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2325                                conf->nb_queue_pools == ETH_32_POOLS)) {
2326                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2327                                                 " nb_queue_pools must be %d or %d.",
2328                                                 ETH_16_POOLS, ETH_32_POOLS);
2329                                 return -EINVAL;
2330                         }
2331                 }
2332                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2333                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2334
2335                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2336                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2337                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2338                                 return -EINVAL;
2339                         }
2340                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2341                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2342                                conf->nb_queue_pools == ETH_32_POOLS)) {
2343                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2344                                                 " nb_queue_pools != %d and"
2345                                                 " nb_queue_pools != %d.",
2346                                                 ETH_16_POOLS, ETH_32_POOLS);
2347                                 return -EINVAL;
2348                         }
2349                 }
2350
2351                 /* For DCB mode check our configuration before we go further */
2352                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2353                         const struct rte_eth_dcb_rx_conf *conf;
2354
2355                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2356                         if (!(conf->nb_tcs == ETH_4_TCS ||
2357                                conf->nb_tcs == ETH_8_TCS)) {
2358                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2359                                                 " and nb_tcs != %d.",
2360                                                 ETH_4_TCS, ETH_8_TCS);
2361                                 return -EINVAL;
2362                         }
2363                 }
2364
2365                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2366                         const struct rte_eth_dcb_tx_conf *conf;
2367
2368                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2369                         if (!(conf->nb_tcs == ETH_4_TCS ||
2370                                conf->nb_tcs == ETH_8_TCS)) {
2371                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2372                                                 " and nb_tcs != %d.",
2373                                                 ETH_4_TCS, ETH_8_TCS);
2374                                 return -EINVAL;
2375                         }
2376                 }
2377
2378                 /*
2379                  * When DCB/VT is off, maximum number of queues changes,
2380                  * except for 82598EB, which remains constant.
2381                  */
2382                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2383                                 hw->mac.type != ixgbe_mac_82598EB) {
2384                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2385                                 PMD_INIT_LOG(ERR,
2386                                              "Neither VT nor DCB are enabled, "
2387                                              "nb_tx_q > %d.",
2388                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2389                                 return -EINVAL;
2390                         }
2391                 }
2392         }
2393         return 0;
2394 }
2395
2396 static int
2397 ixgbe_dev_configure(struct rte_eth_dev *dev)
2398 {
2399         struct ixgbe_interrupt *intr =
2400                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2401         struct ixgbe_adapter *adapter = dev->data->dev_private;
2402         int ret;
2403
2404         PMD_INIT_FUNC_TRACE();
2405
2406         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
2407                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
2408
2409         /* multipe queue mode checking */
2410         ret  = ixgbe_check_mq_mode(dev);
2411         if (ret != 0) {
2412                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2413                             ret);
2414                 return ret;
2415         }
2416
2417         /* set flag to update link status after init */
2418         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2419
2420         /*
2421          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2422          * allocation or vector Rx preconditions we will reset it.
2423          */
2424         adapter->rx_bulk_alloc_allowed = true;
2425         adapter->rx_vec_allowed = true;
2426
2427         return 0;
2428 }
2429
2430 static void
2431 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2432 {
2433         struct ixgbe_hw *hw =
2434                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2435         struct ixgbe_interrupt *intr =
2436                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2437         uint32_t gpie;
2438
2439         /* only set up it on X550EM_X */
2440         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2441                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2442                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2443                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2444                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2445                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2446         }
2447 }
2448
2449 int
2450 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2451                         uint16_t tx_rate, uint64_t q_msk)
2452 {
2453         struct ixgbe_hw *hw;
2454         struct ixgbe_vf_info *vfinfo;
2455         struct rte_eth_link link;
2456         uint8_t  nb_q_per_pool;
2457         uint32_t queue_stride;
2458         uint32_t queue_idx, idx = 0, vf_idx;
2459         uint32_t queue_end;
2460         uint16_t total_rate = 0;
2461         struct rte_pci_device *pci_dev;
2462         int ret;
2463
2464         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2465         ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
2466         if (ret < 0)
2467                 return ret;
2468
2469         if (vf >= pci_dev->max_vfs)
2470                 return -EINVAL;
2471
2472         if (tx_rate > link.link_speed)
2473                 return -EINVAL;
2474
2475         if (q_msk == 0)
2476                 return 0;
2477
2478         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2479         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2480         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2481         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2482         queue_idx = vf * queue_stride;
2483         queue_end = queue_idx + nb_q_per_pool - 1;
2484         if (queue_end >= hw->mac.max_tx_queues)
2485                 return -EINVAL;
2486
2487         if (vfinfo) {
2488                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2489                         if (vf_idx == vf)
2490                                 continue;
2491                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2492                                 idx++)
2493                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2494                 }
2495         } else {
2496                 return -EINVAL;
2497         }
2498
2499         /* Store tx_rate for this vf. */
2500         for (idx = 0; idx < nb_q_per_pool; idx++) {
2501                 if (((uint64_t)0x1 << idx) & q_msk) {
2502                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2503                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2504                         total_rate += tx_rate;
2505                 }
2506         }
2507
2508         if (total_rate > dev->data->dev_link.link_speed) {
2509                 /* Reset stored TX rate of the VF if it causes exceed
2510                  * link speed.
2511                  */
2512                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2513                 return -EINVAL;
2514         }
2515
2516         /* Set RTTBCNRC of each queue/pool for vf X  */
2517         for (; queue_idx <= queue_end; queue_idx++) {
2518                 if (0x1 & q_msk)
2519                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2520                 q_msk = q_msk >> 1;
2521         }
2522
2523         return 0;
2524 }
2525
2526 static int
2527 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw)
2528 {
2529         struct ixgbe_adapter *adapter = dev->data->dev_private;
2530         int err;
2531         uint32_t mflcn;
2532
2533         ixgbe_setup_fc(hw);
2534
2535         err = ixgbe_fc_enable(hw);
2536
2537         /* Not negotiated is not an error case */
2538         if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) {
2539                 /*
2540                  *check if we want to forward MAC frames - driver doesn't
2541                  *have native capability to do that,
2542                  *so we'll write the registers ourselves
2543                  */
2544
2545                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2546
2547                 /* set or clear MFLCN.PMCF bit depending on configuration */
2548                 if (adapter->mac_ctrl_frame_fwd != 0)
2549                         mflcn |= IXGBE_MFLCN_PMCF;
2550                 else
2551                         mflcn &= ~IXGBE_MFLCN_PMCF;
2552
2553                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
2554                 IXGBE_WRITE_FLUSH(hw);
2555
2556                 return 0;
2557         }
2558         return err;
2559 }
2560
2561 /*
2562  * Configure device link speed and setup link.
2563  * It returns 0 on success.
2564  */
2565 static int
2566 ixgbe_dev_start(struct rte_eth_dev *dev)
2567 {
2568         struct ixgbe_hw *hw =
2569                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2570         struct ixgbe_vf_info *vfinfo =
2571                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2572         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2573         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2574         uint32_t intr_vector = 0;
2575         int err;
2576         bool link_up = false, negotiate = 0;
2577         uint32_t speed = 0;
2578         uint32_t allowed_speeds = 0;
2579         int mask = 0;
2580         int status;
2581         uint16_t vf, idx;
2582         uint32_t *link_speeds;
2583         struct ixgbe_tm_conf *tm_conf =
2584                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2585         struct ixgbe_macsec_setting *macsec_setting =
2586                 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
2587
2588         PMD_INIT_FUNC_TRACE();
2589
2590         /* Stop the link setup handler before resetting the HW. */
2591         ixgbe_dev_wait_setup_link_complete(dev, 0);
2592
2593         /* disable uio/vfio intr/eventfd mapping */
2594         rte_intr_disable(intr_handle);
2595
2596         /* stop adapter */
2597         hw->adapter_stopped = 0;
2598         ixgbe_stop_adapter(hw);
2599
2600         /* reinitialize adapter
2601          * this calls reset and start
2602          */
2603         status = ixgbe_pf_reset_hw(hw);
2604         if (status != 0)
2605                 return -1;
2606         hw->mac.ops.start_hw(hw);
2607         hw->mac.get_link_status = true;
2608
2609         /* configure PF module if SRIOV enabled */
2610         ixgbe_pf_host_configure(dev);
2611
2612         ixgbe_dev_phy_intr_setup(dev);
2613
2614         /* check and configure queue intr-vector mapping */
2615         if ((rte_intr_cap_multiple(intr_handle) ||
2616              !RTE_ETH_DEV_SRIOV(dev).active) &&
2617             dev->data->dev_conf.intr_conf.rxq != 0) {
2618                 intr_vector = dev->data->nb_rx_queues;
2619                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2620                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2621                                         IXGBE_MAX_INTR_QUEUE_NUM);
2622                         return -ENOTSUP;
2623                 }
2624                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2625                         return -1;
2626         }
2627
2628         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2629                 intr_handle->intr_vec =
2630                         rte_zmalloc("intr_vec",
2631                                     dev->data->nb_rx_queues * sizeof(int), 0);
2632                 if (intr_handle->intr_vec == NULL) {
2633                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2634                                      " intr_vec", dev->data->nb_rx_queues);
2635                         return -ENOMEM;
2636                 }
2637         }
2638
2639         /* confiugre msix for sleep until rx interrupt */
2640         ixgbe_configure_msix(dev);
2641
2642         /* initialize transmission unit */
2643         ixgbe_dev_tx_init(dev);
2644
2645         /* This can fail when allocating mbufs for descriptor rings */
2646         err = ixgbe_dev_rx_init(dev);
2647         if (err) {
2648                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2649                 goto error;
2650         }
2651
2652         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2653                 ETH_VLAN_EXTEND_MASK;
2654         err = ixgbe_vlan_offload_config(dev, mask);
2655         if (err) {
2656                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2657                 goto error;
2658         }
2659
2660         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2661                 /* Enable vlan filtering for VMDq */
2662                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2663         }
2664
2665         /* Configure DCB hw */
2666         ixgbe_configure_dcb(dev);
2667
2668         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2669                 err = ixgbe_fdir_configure(dev);
2670                 if (err)
2671                         goto error;
2672         }
2673
2674         /* Restore vf rate limit */
2675         if (vfinfo != NULL) {
2676                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2677                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2678                                 if (vfinfo[vf].tx_rate[idx] != 0)
2679                                         ixgbe_set_vf_rate_limit(
2680                                                 dev, vf,
2681                                                 vfinfo[vf].tx_rate[idx],
2682                                                 1 << idx);
2683         }
2684
2685         ixgbe_restore_statistics_mapping(dev);
2686
2687         err = ixgbe_flow_ctrl_enable(dev, hw);
2688         if (err < 0) {
2689                 PMD_INIT_LOG(ERR, "enable flow ctrl err");
2690                 goto error;
2691         }
2692
2693         err = ixgbe_dev_rxtx_start(dev);
2694         if (err < 0) {
2695                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2696                 goto error;
2697         }
2698
2699         /* Skip link setup if loopback mode is enabled. */
2700         if (dev->data->dev_conf.lpbk_mode != 0) {
2701                 err = ixgbe_check_supported_loopback_mode(dev);
2702                 if (err < 0) {
2703                         PMD_INIT_LOG(ERR, "Unsupported loopback mode");
2704                         goto error;
2705                 } else {
2706                         goto skip_link_setup;
2707                 }
2708         }
2709
2710         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2711                 err = hw->mac.ops.setup_sfp(hw);
2712                 if (err)
2713                         goto error;
2714         }
2715
2716         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2717                 /* Turn on the copper */
2718                 ixgbe_set_phy_power(hw, true);
2719         } else {
2720                 /* Turn on the laser */
2721                 ixgbe_enable_tx_laser(hw);
2722         }
2723
2724         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2725         if (err)
2726                 goto error;
2727         dev->data->dev_link.link_status = link_up;
2728
2729         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2730         if (err)
2731                 goto error;
2732
2733         switch (hw->mac.type) {
2734         case ixgbe_mac_X550:
2735         case ixgbe_mac_X550EM_x:
2736         case ixgbe_mac_X550EM_a:
2737                 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2738                         ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
2739                         ETH_LINK_SPEED_10G;
2740                 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2741                                 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
2742                         allowed_speeds = ETH_LINK_SPEED_10M |
2743                                 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
2744                 break;
2745         default:
2746                 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2747                         ETH_LINK_SPEED_10G;
2748         }
2749
2750         link_speeds = &dev->data->dev_conf.link_speeds;
2751
2752         /* Ignore autoneg flag bit and check the validity of 
2753          * link_speed 
2754          */
2755         if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
2756                 PMD_INIT_LOG(ERR, "Invalid link setting");
2757                 goto error;
2758         }
2759
2760         speed = 0x0;
2761         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2762                 switch (hw->mac.type) {
2763                 case ixgbe_mac_82598EB:
2764                         speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2765                         break;
2766                 case ixgbe_mac_82599EB:
2767                 case ixgbe_mac_X540:
2768                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2769                         break;
2770                 case ixgbe_mac_X550:
2771                 case ixgbe_mac_X550EM_x:
2772                 case ixgbe_mac_X550EM_a:
2773                         speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2774                         break;
2775                 default:
2776                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2777                 }
2778         } else {
2779                 if (*link_speeds & ETH_LINK_SPEED_10G)
2780                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2781                 if (*link_speeds & ETH_LINK_SPEED_5G)
2782                         speed |= IXGBE_LINK_SPEED_5GB_FULL;
2783                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
2784                         speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2785                 if (*link_speeds & ETH_LINK_SPEED_1G)
2786                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2787                 if (*link_speeds & ETH_LINK_SPEED_100M)
2788                         speed |= IXGBE_LINK_SPEED_100_FULL;
2789                 if (*link_speeds & ETH_LINK_SPEED_10M)
2790                         speed |= IXGBE_LINK_SPEED_10_FULL;
2791         }
2792
2793         err = ixgbe_setup_link(hw, speed, link_up);
2794         if (err)
2795                 goto error;
2796
2797 skip_link_setup:
2798
2799         if (rte_intr_allow_others(intr_handle)) {
2800                 /* check if lsc interrupt is enabled */
2801                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2802                         ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2803                 else
2804                         ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2805                 ixgbe_dev_macsec_interrupt_setup(dev);
2806         } else {
2807                 rte_intr_callback_unregister(intr_handle,
2808                                              ixgbe_dev_interrupt_handler, dev);
2809                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2810                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2811                                      " no intr multiplex");
2812         }
2813
2814         /* check if rxq interrupt is enabled */
2815         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2816             rte_intr_dp_is_en(intr_handle))
2817                 ixgbe_dev_rxq_interrupt_setup(dev);
2818
2819         /* enable uio/vfio intr/eventfd mapping */
2820         rte_intr_enable(intr_handle);
2821
2822         /* resume enabled intr since hw reset */
2823         ixgbe_enable_intr(dev);
2824         ixgbe_l2_tunnel_conf(dev);
2825         ixgbe_filter_restore(dev);
2826
2827         if (tm_conf->root && !tm_conf->committed)
2828                 PMD_DRV_LOG(WARNING,
2829                             "please call hierarchy_commit() "
2830                             "before starting the port");
2831
2832         /* wait for the controller to acquire link */
2833         err = ixgbe_wait_for_link_up(hw);
2834         if (err)
2835                 goto error;
2836
2837         /*
2838          * Update link status right before return, because it may
2839          * start link configuration process in a separate thread.
2840          */
2841         ixgbe_dev_link_update(dev, 0);
2842
2843         /* setup the macsec setting register */
2844         if (macsec_setting->offload_en)
2845                 ixgbe_dev_macsec_register_enable(dev, macsec_setting);
2846
2847         return 0;
2848
2849 error:
2850         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2851         ixgbe_dev_clear_queues(dev);
2852         return -EIO;
2853 }
2854
2855 /*
2856  * Stop device: disable rx and tx functions to allow for reconfiguring.
2857  */
2858 static void
2859 ixgbe_dev_stop(struct rte_eth_dev *dev)
2860 {
2861         struct rte_eth_link link;
2862         struct ixgbe_adapter *adapter = dev->data->dev_private;
2863         struct ixgbe_hw *hw =
2864                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2865         struct ixgbe_vf_info *vfinfo =
2866                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2867         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2868         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2869         int vf;
2870         struct ixgbe_tm_conf *tm_conf =
2871                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2872
2873         if (hw->adapter_stopped)
2874                 return;
2875
2876         PMD_INIT_FUNC_TRACE();
2877
2878         ixgbe_dev_wait_setup_link_complete(dev, 0);
2879
2880         /* disable interrupts */
2881         ixgbe_disable_intr(hw);
2882
2883         /* reset the NIC */
2884         ixgbe_pf_reset_hw(hw);
2885         hw->adapter_stopped = 0;
2886
2887         /* stop adapter */
2888         ixgbe_stop_adapter(hw);
2889
2890         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2891                 vfinfo[vf].clear_to_send = false;
2892
2893         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2894                 /* Turn off the copper */
2895                 ixgbe_set_phy_power(hw, false);
2896         } else {
2897                 /* Turn off the laser */
2898                 ixgbe_disable_tx_laser(hw);
2899         }
2900
2901         ixgbe_dev_clear_queues(dev);
2902
2903         /* Clear stored conf */
2904         dev->data->scattered_rx = 0;
2905         dev->data->lro = 0;
2906
2907         /* Clear recorded link status */
2908         memset(&link, 0, sizeof(link));
2909         rte_eth_linkstatus_set(dev, &link);
2910
2911         if (!rte_intr_allow_others(intr_handle))
2912                 /* resume to the default handler */
2913                 rte_intr_callback_register(intr_handle,
2914                                            ixgbe_dev_interrupt_handler,
2915                                            (void *)dev);
2916
2917         /* Clean datapath event and queue/vec mapping */
2918         rte_intr_efd_disable(intr_handle);
2919         if (intr_handle->intr_vec != NULL) {
2920                 rte_free(intr_handle->intr_vec);
2921                 intr_handle->intr_vec = NULL;
2922         }
2923
2924         /* reset hierarchy commit */
2925         tm_conf->committed = false;
2926
2927         adapter->rss_reta_updated = 0;
2928
2929         hw->adapter_stopped = true;
2930 }
2931
2932 /*
2933  * Set device link up: enable tx.
2934  */
2935 static int
2936 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2937 {
2938         struct ixgbe_hw *hw =
2939                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2940         if (hw->mac.type == ixgbe_mac_82599EB) {
2941 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2942                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2943                         /* Not suported in bypass mode */
2944                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2945                                      "by device id 0x%x", hw->device_id);
2946                         return -ENOTSUP;
2947                 }
2948 #endif
2949         }
2950
2951         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2952                 /* Turn on the copper */
2953                 ixgbe_set_phy_power(hw, true);
2954         } else {
2955                 /* Turn on the laser */
2956                 ixgbe_enable_tx_laser(hw);
2957                 ixgbe_dev_link_update(dev, 0);
2958         }
2959
2960         return 0;
2961 }
2962
2963 /*
2964  * Set device link down: disable tx.
2965  */
2966 static int
2967 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2968 {
2969         struct ixgbe_hw *hw =
2970                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2971         if (hw->mac.type == ixgbe_mac_82599EB) {
2972 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2973                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2974                         /* Not suported in bypass mode */
2975                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2976                                      "by device id 0x%x", hw->device_id);
2977                         return -ENOTSUP;
2978                 }
2979 #endif
2980         }
2981
2982         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2983                 /* Turn off the copper */
2984                 ixgbe_set_phy_power(hw, false);
2985         } else {
2986                 /* Turn off the laser */
2987                 ixgbe_disable_tx_laser(hw);
2988                 ixgbe_dev_link_update(dev, 0);
2989         }
2990
2991         return 0;
2992 }
2993
2994 /*
2995  * Reset and stop device.
2996  */
2997 static void
2998 ixgbe_dev_close(struct rte_eth_dev *dev)
2999 {
3000         struct ixgbe_hw *hw =
3001                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3002         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3003         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3004         int retries = 0;
3005         int ret;
3006
3007         PMD_INIT_FUNC_TRACE();
3008
3009         ixgbe_pf_reset_hw(hw);
3010
3011         ixgbe_dev_stop(dev);
3012
3013         ixgbe_dev_free_queues(dev);
3014
3015         ixgbe_disable_pcie_master(hw);
3016
3017         /* reprogram the RAR[0] in case user changed it. */
3018         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3019
3020         dev->dev_ops = NULL;
3021         dev->rx_pkt_burst = NULL;
3022         dev->tx_pkt_burst = NULL;
3023
3024         /* Unlock any pending hardware semaphore */
3025         ixgbe_swfw_lock_reset(hw);
3026
3027         /* disable uio intr before callback unregister */
3028         rte_intr_disable(intr_handle);
3029
3030         do {
3031                 ret = rte_intr_callback_unregister(intr_handle,
3032                                 ixgbe_dev_interrupt_handler, dev);
3033                 if (ret >= 0 || ret == -ENOENT) {
3034                         break;
3035                 } else if (ret != -EAGAIN) {
3036                         PMD_INIT_LOG(ERR,
3037                                 "intr callback unregister failed: %d",
3038                                 ret);
3039                 }
3040                 rte_delay_ms(100);
3041         } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
3042
3043         /* cancel the delay handler before remove dev */
3044         rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev);
3045
3046         /* uninitialize PF if max_vfs not zero */
3047         ixgbe_pf_host_uninit(dev);
3048
3049         /* remove all the fdir filters & hash */
3050         ixgbe_fdir_filter_uninit(dev);
3051
3052         /* remove all the L2 tunnel filters & hash */
3053         ixgbe_l2_tn_filter_uninit(dev);
3054
3055         /* Remove all ntuple filters of the device */
3056         ixgbe_ntuple_filter_uninit(dev);
3057
3058         /* clear all the filters list */
3059         ixgbe_filterlist_flush();
3060
3061         /* Remove all Traffic Manager configuration */
3062         ixgbe_tm_conf_uninit(dev);
3063
3064 #ifdef RTE_LIBRTE_SECURITY
3065         rte_free(dev->security_ctx);
3066 #endif
3067
3068 }
3069
3070 /*
3071  * Reset PF device.
3072  */
3073 static int
3074 ixgbe_dev_reset(struct rte_eth_dev *dev)
3075 {
3076         int ret;
3077
3078         /* When a DPDK PMD PF begin to reset PF port, it should notify all
3079          * its VF to make them align with it. The detailed notification
3080          * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
3081          * To avoid unexpected behavior in VF, currently reset of PF with
3082          * SR-IOV activation is not supported. It might be supported later.
3083          */
3084         if (dev->data->sriov.active)
3085                 return -ENOTSUP;
3086
3087         ret = eth_ixgbe_dev_uninit(dev);
3088         if (ret)
3089                 return ret;
3090
3091         ret = eth_ixgbe_dev_init(dev, NULL);
3092
3093         return ret;
3094 }
3095
3096 static void
3097 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
3098                            struct ixgbe_hw_stats *hw_stats,
3099                            struct ixgbe_macsec_stats *macsec_stats,
3100                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
3101                            uint64_t *total_qprc, uint64_t *total_qprdc)
3102 {
3103         uint32_t bprc, lxon, lxoff, total;
3104         uint32_t delta_gprc = 0;
3105         unsigned i;
3106         /* Workaround for RX byte count not including CRC bytes when CRC
3107          * strip is enabled. CRC bytes are removed from counters when crc_strip
3108          * is disabled.
3109          */
3110         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
3111                         IXGBE_HLREG0_RXCRCSTRP);
3112
3113         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3114         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3115         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3116         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3117
3118         for (i = 0; i < 8; i++) {
3119                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3120
3121                 /* global total per queue */
3122                 hw_stats->mpc[i] += mp;
3123                 /* Running comprehensive total for stats display */
3124                 *total_missed_rx += hw_stats->mpc[i];
3125                 if (hw->mac.type == ixgbe_mac_82598EB) {
3126                         hw_stats->rnbc[i] +=
3127                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3128                         hw_stats->pxonrxc[i] +=
3129                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3130                         hw_stats->pxoffrxc[i] +=
3131                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3132                 } else {
3133                         hw_stats->pxonrxc[i] +=
3134                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3135                         hw_stats->pxoffrxc[i] +=
3136                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
3137                         hw_stats->pxon2offc[i] +=
3138                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3139                 }
3140                 hw_stats->pxontxc[i] +=
3141                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3142                 hw_stats->pxofftxc[i] +=
3143                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3144         }
3145         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3146                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3147                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3148                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3149
3150                 delta_gprc += delta_qprc;
3151
3152                 hw_stats->qprc[i] += delta_qprc;
3153                 hw_stats->qptc[i] += delta_qptc;
3154
3155                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
3156                 hw_stats->qbrc[i] +=
3157                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
3158                 if (crc_strip == 0)
3159                         hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN;
3160
3161                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
3162                 hw_stats->qbtc[i] +=
3163                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
3164
3165                 hw_stats->qprdc[i] += delta_qprdc;
3166                 *total_qprdc += hw_stats->qprdc[i];
3167
3168                 *total_qprc += hw_stats->qprc[i];
3169                 *total_qbrc += hw_stats->qbrc[i];
3170         }
3171         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3172         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3173         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3174
3175         /*
3176          * An errata states that gprc actually counts good + missed packets:
3177          * Workaround to set gprc to summated queue packet receives
3178          */
3179         hw_stats->gprc = *total_qprc;
3180
3181         if (hw->mac.type != ixgbe_mac_82598EB) {
3182                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3183                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3184                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3185                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3186                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3187                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3188                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3189                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3190         } else {
3191                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3192                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3193                 /* 82598 only has a counter in the high register */
3194                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3195                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3196                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3197         }
3198         uint64_t old_tpr = hw_stats->tpr;
3199
3200         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3201         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3202
3203         if (crc_strip == 0)
3204                 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN;
3205
3206         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3207         hw_stats->gptc += delta_gptc;
3208         hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN;
3209         hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
3210
3211         /*
3212          * Workaround: mprc hardware is incorrectly counting
3213          * broadcasts, so for now we subtract those.
3214          */
3215         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3216         hw_stats->bprc += bprc;
3217         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3218         if (hw->mac.type == ixgbe_mac_82598EB)
3219                 hw_stats->mprc -= bprc;
3220
3221         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3222         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3223         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3224         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3225         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3226         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3227
3228         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3229         hw_stats->lxontxc += lxon;
3230         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3231         hw_stats->lxofftxc += lxoff;
3232         total = lxon + lxoff;
3233
3234         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3235         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3236         hw_stats->gptc -= total;
3237         hw_stats->mptc -= total;
3238         hw_stats->ptc64 -= total;
3239         hw_stats->gotc -= total * RTE_ETHER_MIN_LEN;
3240
3241         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3242         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3243         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3244         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3245         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3246         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3247         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3248         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3249         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3250         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3251         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3252         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3253         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3254         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3255         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3256         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3257         /* Only read FCOE on 82599 */
3258         if (hw->mac.type != ixgbe_mac_82598EB) {
3259                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3260                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3261                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3262                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3263                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3264         }
3265
3266         /* Flow Director Stats registers */
3267         if (hw->mac.type != ixgbe_mac_82598EB) {
3268                 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3269                 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3270                 hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
3271                                         IXGBE_FDIRUSTAT) & 0xFFFF;
3272                 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
3273                                         IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
3274                 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
3275                                         IXGBE_FDIRFSTAT) & 0xFFFF;
3276                 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
3277                                         IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
3278         }
3279         /* MACsec Stats registers */
3280         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3281         macsec_stats->out_pkts_encrypted +=
3282                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3283         macsec_stats->out_pkts_protected +=
3284                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3285         macsec_stats->out_octets_encrypted +=
3286                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3287         macsec_stats->out_octets_protected +=
3288                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3289         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3290         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3291         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3292         macsec_stats->in_pkts_unknownsci +=
3293                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3294         macsec_stats->in_octets_decrypted +=
3295                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3296         macsec_stats->in_octets_validated +=
3297                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3298         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3299         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3300         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3301         for (i = 0; i < 2; i++) {
3302                 macsec_stats->in_pkts_ok +=
3303                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3304                 macsec_stats->in_pkts_invalid +=
3305                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3306                 macsec_stats->in_pkts_notvalid +=
3307                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3308         }
3309         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3310         macsec_stats->in_pkts_notusingsa +=
3311                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3312 }
3313
3314 /*
3315  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3316  */
3317 static int
3318 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3319 {
3320         struct ixgbe_hw *hw =
3321                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3322         struct ixgbe_hw_stats *hw_stats =
3323                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3324         struct ixgbe_macsec_stats *macsec_stats =
3325                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3326                                 dev->data->dev_private);
3327         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3328         unsigned i;
3329
3330         total_missed_rx = 0;
3331         total_qbrc = 0;
3332         total_qprc = 0;
3333         total_qprdc = 0;
3334
3335         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3336                         &total_qbrc, &total_qprc, &total_qprdc);
3337
3338         if (stats == NULL)
3339                 return -EINVAL;
3340
3341         /* Fill out the rte_eth_stats statistics structure */
3342         stats->ipackets = total_qprc;
3343         stats->ibytes = total_qbrc;
3344         stats->opackets = hw_stats->gptc;
3345         stats->obytes = hw_stats->gotc;
3346
3347         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3348                 stats->q_ipackets[i] = hw_stats->qprc[i];
3349                 stats->q_opackets[i] = hw_stats->qptc[i];
3350                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3351                 stats->q_obytes[i] = hw_stats->qbtc[i];
3352                 stats->q_errors[i] = hw_stats->qprdc[i];
3353         }
3354
3355         /* Rx Errors */
3356         stats->imissed  = total_missed_rx;
3357         stats->ierrors  = hw_stats->crcerrs +
3358                           hw_stats->mspdc +
3359                           hw_stats->rlec +
3360                           hw_stats->ruc +
3361                           hw_stats->roc +
3362                           hw_stats->illerrc +
3363                           hw_stats->errbc +
3364                           hw_stats->rfc +
3365                           hw_stats->fccrc +
3366                           hw_stats->fclast;
3367
3368         /* Tx Errors */
3369         stats->oerrors  = 0;
3370         return 0;
3371 }
3372
3373 static int
3374 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3375 {
3376         struct ixgbe_hw_stats *stats =
3377                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3378
3379         /* HW registers are cleared on read */
3380         ixgbe_dev_stats_get(dev, NULL);
3381
3382         /* Reset software totals */
3383         memset(stats, 0, sizeof(*stats));
3384
3385         return 0;
3386 }
3387
3388 /* This function calculates the number of xstats based on the current config */
3389 static unsigned
3390 ixgbe_xstats_calc_num(void) {
3391         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3392                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3393                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3394 }
3395
3396 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3397         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3398 {
3399         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3400         unsigned stat, i, count;
3401
3402         if (xstats_names != NULL) {
3403                 count = 0;
3404
3405                 /* Note: limit >= cnt_stats checked upstream
3406                  * in rte_eth_xstats_names()
3407                  */
3408
3409                 /* Extended stats from ixgbe_hw_stats */
3410                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3411                         strlcpy(xstats_names[count].name,
3412                                 rte_ixgbe_stats_strings[i].name,
3413                                 sizeof(xstats_names[count].name));
3414                         count++;
3415                 }
3416
3417                 /* MACsec Stats */
3418                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3419                         strlcpy(xstats_names[count].name,
3420                                 rte_ixgbe_macsec_strings[i].name,
3421                                 sizeof(xstats_names[count].name));
3422                         count++;
3423                 }
3424
3425                 /* RX Priority Stats */
3426                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3427                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3428                                 snprintf(xstats_names[count].name,
3429                                         sizeof(xstats_names[count].name),
3430                                         "rx_priority%u_%s", i,
3431                                         rte_ixgbe_rxq_strings[stat].name);
3432                                 count++;
3433                         }
3434                 }
3435
3436                 /* TX Priority Stats */
3437                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3438                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3439                                 snprintf(xstats_names[count].name,
3440                                         sizeof(xstats_names[count].name),
3441                                         "tx_priority%u_%s", i,
3442                                         rte_ixgbe_txq_strings[stat].name);
3443                                 count++;
3444                         }
3445                 }
3446         }
3447         return cnt_stats;
3448 }
3449
3450 static int ixgbe_dev_xstats_get_names_by_id(
3451         struct rte_eth_dev *dev,
3452         struct rte_eth_xstat_name *xstats_names,
3453         const uint64_t *ids,
3454         unsigned int limit)
3455 {
3456         if (!ids) {
3457                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3458                 unsigned int stat, i, count;
3459
3460                 if (xstats_names != NULL) {
3461                         count = 0;
3462
3463                         /* Note: limit >= cnt_stats checked upstream
3464                          * in rte_eth_xstats_names()
3465                          */
3466
3467                         /* Extended stats from ixgbe_hw_stats */
3468                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3469                                 strlcpy(xstats_names[count].name,
3470                                         rte_ixgbe_stats_strings[i].name,
3471                                         sizeof(xstats_names[count].name));
3472                                 count++;
3473                         }
3474
3475                         /* MACsec Stats */
3476                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3477                                 strlcpy(xstats_names[count].name,
3478                                         rte_ixgbe_macsec_strings[i].name,
3479                                         sizeof(xstats_names[count].name));
3480                                 count++;
3481                         }
3482
3483                         /* RX Priority Stats */
3484                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3485                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3486                                         snprintf(xstats_names[count].name,
3487                                             sizeof(xstats_names[count].name),
3488                                             "rx_priority%u_%s", i,
3489                                             rte_ixgbe_rxq_strings[stat].name);
3490                                         count++;
3491                                 }
3492                         }
3493
3494                         /* TX Priority Stats */
3495                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3496                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3497                                         snprintf(xstats_names[count].name,
3498                                             sizeof(xstats_names[count].name),
3499                                             "tx_priority%u_%s", i,
3500                                             rte_ixgbe_txq_strings[stat].name);
3501                                         count++;
3502                                 }
3503                         }
3504                 }
3505                 return cnt_stats;
3506         }
3507
3508         uint16_t i;
3509         uint16_t size = ixgbe_xstats_calc_num();
3510         struct rte_eth_xstat_name xstats_names_copy[size];
3511
3512         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3513                         size);
3514
3515         for (i = 0; i < limit; i++) {
3516                 if (ids[i] >= size) {
3517                         PMD_INIT_LOG(ERR, "id value isn't valid");
3518                         return -1;
3519                 }
3520                 strcpy(xstats_names[i].name,
3521                                 xstats_names_copy[ids[i]].name);
3522         }
3523         return limit;
3524 }
3525
3526 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3527         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3528 {
3529         unsigned i;
3530
3531         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3532                 return -ENOMEM;
3533
3534         if (xstats_names != NULL)
3535                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3536                         strlcpy(xstats_names[i].name,
3537                                 rte_ixgbevf_stats_strings[i].name,
3538                                 sizeof(xstats_names[i].name));
3539         return IXGBEVF_NB_XSTATS;
3540 }
3541
3542 static int
3543 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3544                                          unsigned n)
3545 {
3546         struct ixgbe_hw *hw =
3547                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3548         struct ixgbe_hw_stats *hw_stats =
3549                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3550         struct ixgbe_macsec_stats *macsec_stats =
3551                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3552                                 dev->data->dev_private);
3553         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3554         unsigned i, stat, count = 0;
3555
3556         count = ixgbe_xstats_calc_num();
3557
3558         if (n < count)
3559                 return count;
3560
3561         total_missed_rx = 0;
3562         total_qbrc = 0;
3563         total_qprc = 0;
3564         total_qprdc = 0;
3565
3566         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3567                         &total_qbrc, &total_qprc, &total_qprdc);
3568
3569         /* If this is a reset xstats is NULL, and we have cleared the
3570          * registers by reading them.
3571          */
3572         if (!xstats)
3573                 return 0;
3574
3575         /* Extended stats from ixgbe_hw_stats */
3576         count = 0;
3577         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3578                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3579                                 rte_ixgbe_stats_strings[i].offset);
3580                 xstats[count].id = count;
3581                 count++;
3582         }
3583
3584         /* MACsec Stats */
3585         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3586                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3587                                 rte_ixgbe_macsec_strings[i].offset);
3588                 xstats[count].id = count;
3589                 count++;
3590         }
3591
3592         /* RX Priority Stats */
3593         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3594                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3595                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3596                                         rte_ixgbe_rxq_strings[stat].offset +
3597                                         (sizeof(uint64_t) * i));
3598                         xstats[count].id = count;
3599                         count++;
3600                 }
3601         }
3602
3603         /* TX Priority Stats */
3604         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3605                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3606                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3607                                         rte_ixgbe_txq_strings[stat].offset +
3608                                         (sizeof(uint64_t) * i));
3609                         xstats[count].id = count;
3610                         count++;
3611                 }
3612         }
3613         return count;
3614 }
3615
3616 static int
3617 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3618                 uint64_t *values, unsigned int n)
3619 {
3620         if (!ids) {
3621                 struct ixgbe_hw *hw =
3622                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3623                 struct ixgbe_hw_stats *hw_stats =
3624                                 IXGBE_DEV_PRIVATE_TO_STATS(
3625                                                 dev->data->dev_private);
3626                 struct ixgbe_macsec_stats *macsec_stats =
3627                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3628                                         dev->data->dev_private);
3629                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3630                 unsigned int i, stat, count = 0;
3631
3632                 count = ixgbe_xstats_calc_num();
3633
3634                 if (!ids && n < count)
3635                         return count;
3636
3637                 total_missed_rx = 0;
3638                 total_qbrc = 0;
3639                 total_qprc = 0;
3640                 total_qprdc = 0;
3641
3642                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3643                                 &total_missed_rx, &total_qbrc, &total_qprc,
3644                                 &total_qprdc);
3645
3646                 /* If this is a reset xstats is NULL, and we have cleared the
3647                  * registers by reading them.
3648                  */
3649                 if (!ids && !values)
3650                         return 0;
3651
3652                 /* Extended stats from ixgbe_hw_stats */
3653                 count = 0;
3654                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3655                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3656                                         rte_ixgbe_stats_strings[i].offset);
3657                         count++;
3658                 }
3659
3660                 /* MACsec Stats */
3661                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3662                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3663                                         rte_ixgbe_macsec_strings[i].offset);
3664                         count++;
3665                 }
3666
3667                 /* RX Priority Stats */
3668                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3669                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3670                                 values[count] =
3671                                         *(uint64_t *)(((char *)hw_stats) +
3672                                         rte_ixgbe_rxq_strings[stat].offset +
3673                                         (sizeof(uint64_t) * i));
3674                                 count++;
3675                         }
3676                 }
3677
3678                 /* TX Priority Stats */
3679                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3680                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3681                                 values[count] =
3682                                         *(uint64_t *)(((char *)hw_stats) +
3683                                         rte_ixgbe_txq_strings[stat].offset +
3684                                         (sizeof(uint64_t) * i));
3685                                 count++;
3686                         }
3687                 }
3688                 return count;
3689         }
3690
3691         uint16_t i;
3692         uint16_t size = ixgbe_xstats_calc_num();
3693         uint64_t values_copy[size];
3694
3695         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3696
3697         for (i = 0; i < n; i++) {
3698                 if (ids[i] >= size) {
3699                         PMD_INIT_LOG(ERR, "id value isn't valid");
3700                         return -1;
3701                 }
3702                 values[i] = values_copy[ids[i]];
3703         }
3704         return n;
3705 }
3706
3707 static int
3708 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3709 {
3710         struct ixgbe_hw_stats *stats =
3711                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3712         struct ixgbe_macsec_stats *macsec_stats =
3713                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3714                                 dev->data->dev_private);
3715
3716         unsigned count = ixgbe_xstats_calc_num();
3717
3718         /* HW registers are cleared on read */
3719         ixgbe_dev_xstats_get(dev, NULL, count);
3720
3721         /* Reset software totals */
3722         memset(stats, 0, sizeof(*stats));
3723         memset(macsec_stats, 0, sizeof(*macsec_stats));
3724
3725         return 0;
3726 }
3727
3728 static void
3729 ixgbevf_update_stats(struct rte_eth_dev *dev)
3730 {
3731         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3732         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3733                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3734
3735         /* Good Rx packet, include VF loopback */
3736         UPDATE_VF_STAT(IXGBE_VFGPRC,
3737             hw_stats->last_vfgprc, hw_stats->vfgprc);
3738
3739         /* Good Rx octets, include VF loopback */
3740         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3741             hw_stats->last_vfgorc, hw_stats->vfgorc);
3742
3743         /* Good Tx packet, include VF loopback */
3744         UPDATE_VF_STAT(IXGBE_VFGPTC,
3745             hw_stats->last_vfgptc, hw_stats->vfgptc);
3746
3747         /* Good Tx octets, include VF loopback */
3748         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3749             hw_stats->last_vfgotc, hw_stats->vfgotc);
3750
3751         /* Rx Multicst Packet */
3752         UPDATE_VF_STAT(IXGBE_VFMPRC,
3753             hw_stats->last_vfmprc, hw_stats->vfmprc);
3754 }
3755
3756 static int
3757 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3758                        unsigned n)
3759 {
3760         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3761                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3762         unsigned i;
3763
3764         if (n < IXGBEVF_NB_XSTATS)
3765                 return IXGBEVF_NB_XSTATS;
3766
3767         ixgbevf_update_stats(dev);
3768
3769         if (!xstats)
3770                 return 0;
3771
3772         /* Extended stats */
3773         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3774                 xstats[i].id = i;
3775                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3776                         rte_ixgbevf_stats_strings[i].offset);
3777         }
3778
3779         return IXGBEVF_NB_XSTATS;
3780 }
3781
3782 static int
3783 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3784 {
3785         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3786                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3787
3788         ixgbevf_update_stats(dev);
3789
3790         if (stats == NULL)
3791                 return -EINVAL;
3792
3793         stats->ipackets = hw_stats->vfgprc;
3794         stats->ibytes = hw_stats->vfgorc;
3795         stats->opackets = hw_stats->vfgptc;
3796         stats->obytes = hw_stats->vfgotc;
3797         return 0;
3798 }
3799
3800 static int
3801 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3802 {
3803         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3804                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3805
3806         /* Sync HW register to the last stats */
3807         ixgbevf_dev_stats_get(dev, NULL);
3808
3809         /* reset HW current stats*/
3810         hw_stats->vfgprc = 0;
3811         hw_stats->vfgorc = 0;
3812         hw_stats->vfgptc = 0;
3813         hw_stats->vfgotc = 0;
3814
3815         return 0;
3816 }
3817
3818 static int
3819 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3820 {
3821         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3822         u16 eeprom_verh, eeprom_verl;
3823         u32 etrack_id;
3824         int ret;
3825
3826         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3827         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3828
3829         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3830         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3831
3832         ret += 1; /* add the size of '\0' */
3833         if (fw_size < (u32)ret)
3834                 return ret;
3835         else
3836                 return 0;
3837 }
3838
3839 static int
3840 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3841 {
3842         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3843         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3844         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3845
3846         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3847         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3848         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3849                 /*
3850                  * When DCB/VT is off, maximum number of queues changes,
3851                  * except for 82598EB, which remains constant.
3852                  */
3853                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3854                                 hw->mac.type != ixgbe_mac_82598EB)
3855                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3856         }
3857         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3858         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3859         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3860         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3861         dev_info->max_vfs = pci_dev->max_vfs;
3862         if (hw->mac.type == ixgbe_mac_82598EB)
3863                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3864         else
3865                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3866         dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
3867         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3868         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3869         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3870         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3871                                      dev_info->rx_queue_offload_capa);
3872         dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3873         dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3874
3875         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3876                 .rx_thresh = {
3877                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3878                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3879                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3880                 },
3881                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3882                 .rx_drop_en = 0,
3883                 .offloads = 0,
3884         };
3885
3886         dev_info->default_txconf = (struct rte_eth_txconf) {
3887                 .tx_thresh = {
3888                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3889                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3890                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3891                 },
3892                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3893                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3894                 .offloads = 0,
3895         };
3896
3897         dev_info->rx_desc_lim = rx_desc_lim;
3898         dev_info->tx_desc_lim = tx_desc_lim;
3899
3900         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3901         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3902         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3903
3904         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3905         if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
3906                         hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
3907                 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3908                         ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
3909
3910         if (hw->mac.type == ixgbe_mac_X540 ||
3911             hw->mac.type == ixgbe_mac_X540_vf ||
3912             hw->mac.type == ixgbe_mac_X550 ||
3913             hw->mac.type == ixgbe_mac_X550_vf) {
3914                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3915         }
3916         if (hw->mac.type == ixgbe_mac_X550) {
3917                 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3918                 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3919         }
3920
3921         /* Driver-preferred Rx/Tx parameters */
3922         dev_info->default_rxportconf.burst_size = 32;
3923         dev_info->default_txportconf.burst_size = 32;
3924         dev_info->default_rxportconf.nb_queues = 1;
3925         dev_info->default_txportconf.nb_queues = 1;
3926         dev_info->default_rxportconf.ring_size = 256;
3927         dev_info->default_txportconf.ring_size = 256;
3928
3929         return 0;
3930 }
3931
3932 static const uint32_t *
3933 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3934 {
3935         static const uint32_t ptypes[] = {
3936                 /* For non-vec functions,
3937                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3938                  * for vec functions,
3939                  * refers to _recv_raw_pkts_vec().
3940                  */
3941                 RTE_PTYPE_L2_ETHER,
3942                 RTE_PTYPE_L3_IPV4,
3943                 RTE_PTYPE_L3_IPV4_EXT,
3944                 RTE_PTYPE_L3_IPV6,
3945                 RTE_PTYPE_L3_IPV6_EXT,
3946                 RTE_PTYPE_L4_SCTP,
3947                 RTE_PTYPE_L4_TCP,
3948                 RTE_PTYPE_L4_UDP,
3949                 RTE_PTYPE_TUNNEL_IP,
3950                 RTE_PTYPE_INNER_L3_IPV6,
3951                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3952                 RTE_PTYPE_INNER_L4_TCP,
3953                 RTE_PTYPE_INNER_L4_UDP,
3954                 RTE_PTYPE_UNKNOWN
3955         };
3956
3957         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3958             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3959             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3960             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3961                 return ptypes;
3962
3963 #if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON)
3964         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3965             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3966                 return ptypes;
3967 #endif
3968         return NULL;
3969 }
3970
3971 static int
3972 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3973                      struct rte_eth_dev_info *dev_info)
3974 {
3975         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3976         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3977
3978         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3979         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3980         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3981         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3982         dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
3983         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3984         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3985         dev_info->max_vfs = pci_dev->max_vfs;
3986         if (hw->mac.type == ixgbe_mac_82598EB)
3987                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3988         else
3989                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3990         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3991         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3992                                      dev_info->rx_queue_offload_capa);
3993         dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3994         dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3995         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3996         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3997         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3998
3999         dev_info->default_rxconf = (struct rte_eth_rxconf) {
4000                 .rx_thresh = {
4001                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
4002                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
4003                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
4004                 },
4005                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
4006                 .rx_drop_en = 0,
4007                 .offloads = 0,
4008         };
4009
4010         dev_info->default_txconf = (struct rte_eth_txconf) {
4011                 .tx_thresh = {
4012                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
4013                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
4014                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
4015                 },
4016                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
4017                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
4018                 .offloads = 0,
4019         };
4020
4021         dev_info->rx_desc_lim = rx_desc_lim;
4022         dev_info->tx_desc_lim = tx_desc_lim;
4023
4024         return 0;
4025 }
4026
4027 static int
4028 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4029                    bool *link_up, int wait_to_complete)
4030 {
4031         struct ixgbe_adapter *adapter = container_of(hw,
4032                                                      struct ixgbe_adapter, hw);
4033         struct ixgbe_mbx_info *mbx = &hw->mbx;
4034         struct ixgbe_mac_info *mac = &hw->mac;
4035         uint32_t links_reg, in_msg;
4036         int ret_val = 0;
4037
4038         /* If we were hit with a reset drop the link */
4039         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
4040                 mac->get_link_status = true;
4041
4042         if (!mac->get_link_status)
4043                 goto out;
4044
4045         /* if link status is down no point in checking to see if pf is up */
4046         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
4047         if (!(links_reg & IXGBE_LINKS_UP))
4048                 goto out;
4049
4050         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
4051          * before the link status is correct
4052          */
4053         if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
4054                 int i;
4055
4056                 for (i = 0; i < 5; i++) {
4057                         rte_delay_us(100);
4058                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
4059
4060                         if (!(links_reg & IXGBE_LINKS_UP))
4061                                 goto out;
4062                 }
4063         }
4064
4065         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4066         case IXGBE_LINKS_SPEED_10G_82599:
4067                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4068                 if (hw->mac.type >= ixgbe_mac_X550) {
4069                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4070                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4071                 }
4072                 break;
4073         case IXGBE_LINKS_SPEED_1G_82599:
4074                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4075                 break;
4076         case IXGBE_LINKS_SPEED_100_82599:
4077                 *speed = IXGBE_LINK_SPEED_100_FULL;
4078                 if (hw->mac.type == ixgbe_mac_X550) {
4079                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4080                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4081                 }
4082                 break;
4083         case IXGBE_LINKS_SPEED_10_X550EM_A:
4084                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4085                 /* Since Reserved in older MAC's */
4086                 if (hw->mac.type >= ixgbe_mac_X550)
4087                         *speed = IXGBE_LINK_SPEED_10_FULL;
4088                 break;
4089         default:
4090                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4091         }
4092
4093         if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) {
4094                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
4095                         mac->get_link_status = true;
4096                 else
4097                         mac->get_link_status = false;
4098
4099                 goto out;
4100         }
4101
4102         /* if the read failed it could just be a mailbox collision, best wait
4103          * until we are called again and don't report an error
4104          */
4105         if (mbx->ops.read(hw, &in_msg, 1, 0))
4106                 goto out;
4107
4108         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
4109                 /* msg is not CTS and is NACK we must have lost CTS status */
4110                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
4111                         mac->get_link_status = false;
4112                 goto out;
4113         }
4114
4115         /* the pf is talking, if we timed out in the past we reinit */
4116         if (!mbx->timeout) {
4117                 ret_val = -1;
4118                 goto out;
4119         }
4120
4121         /* if we passed all the tests above then the link is up and we no
4122          * longer need to check for link
4123          */
4124         mac->get_link_status = false;
4125
4126 out:
4127         *link_up = !mac->get_link_status;
4128         return ret_val;
4129 }
4130
4131 /*
4132  * If @timeout_ms was 0, it means that it will not return until link complete.
4133  * It returns 1 on complete, return 0 on timeout.
4134  */
4135 static int
4136 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms)
4137 {
4138 #define WARNING_TIMEOUT    9000 /* 9s  in total */
4139         struct ixgbe_adapter *ad = dev->data->dev_private;
4140         uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
4141
4142         while (rte_atomic32_read(&ad->link_thread_running)) {
4143                 msec_delay(1);
4144                 timeout--;
4145
4146                 if (timeout_ms) {
4147                         if (!timeout)
4148                                 return 0;
4149                 } else if (!timeout) {
4150                         /* It will not return until link complete */
4151                         timeout = WARNING_TIMEOUT;
4152                         PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!");
4153                 }
4154         }
4155
4156         return 1;
4157 }
4158
4159 static void *
4160 ixgbe_dev_setup_link_thread_handler(void *param)
4161 {
4162         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4163         struct ixgbe_adapter *ad = dev->data->dev_private;
4164         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4165         struct ixgbe_interrupt *intr =
4166                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4167         u32 speed;
4168         bool autoneg = false;
4169
4170         pthread_detach(pthread_self());
4171         speed = hw->phy.autoneg_advertised;
4172         if (!speed)
4173                 ixgbe_get_link_capabilities(hw, &speed, &autoneg);
4174
4175         ixgbe_setup_link(hw, speed, true);
4176
4177         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4178         rte_atomic32_clear(&ad->link_thread_running);
4179         return NULL;
4180 }
4181
4182 /*
4183  * In freebsd environment, nic_uio drivers do not support interrupts,
4184  * rte_intr_callback_register() will fail to register interrupts.
4185  * We can not make link status to change from down to up by interrupt
4186  * callback. So we need to wait for the controller to acquire link
4187  * when ports start.
4188  * It returns 0 on link up.
4189  */
4190 static int
4191 ixgbe_wait_for_link_up(struct ixgbe_hw *hw)
4192 {
4193 #ifdef RTE_EXEC_ENV_FREEBSD
4194         int err, i;
4195         bool link_up = false;
4196         uint32_t speed = 0;
4197         const int nb_iter = 25;
4198
4199         for (i = 0; i < nb_iter; i++) {
4200                 err = ixgbe_check_link(hw, &speed, &link_up, 0);
4201                 if (err)
4202                         return err;
4203                 if (link_up)
4204                         return 0;
4205                 msec_delay(200);
4206         }
4207
4208         return 0;
4209 #else
4210         RTE_SET_USED(hw);
4211         return 0;
4212 #endif
4213 }
4214
4215 /* return 0 means link status changed, -1 means not changed */
4216 int
4217 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
4218                             int wait_to_complete, int vf)
4219 {
4220         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4221         struct ixgbe_adapter *ad = dev->data->dev_private;
4222         struct rte_eth_link link;
4223         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4224         struct ixgbe_interrupt *intr =
4225                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4226         bool link_up;
4227         int diag;
4228         int wait = 1;
4229         u32 esdp_reg;
4230
4231         memset(&link, 0, sizeof(link));
4232         link.link_status = ETH_LINK_DOWN;
4233         link.link_speed = ETH_SPEED_NUM_NONE;
4234         link.link_duplex = ETH_LINK_HALF_DUPLEX;
4235         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
4236                         ETH_LINK_SPEED_FIXED);
4237
4238         hw->mac.get_link_status = true;
4239
4240         if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG)
4241                 return rte_eth_linkstatus_set(dev, &link);
4242
4243         /* check if it needs to wait to complete, if lsc interrupt is enabled */
4244         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
4245                 wait = 0;
4246
4247 /* BSD has no interrupt mechanism, so force NIC status synchronization. */
4248 #ifdef RTE_EXEC_ENV_FREEBSD
4249         wait = 1;
4250 #endif
4251
4252         if (vf)
4253                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
4254         else
4255                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
4256
4257         if (diag != 0) {
4258                 link.link_speed = ETH_SPEED_NUM_100M;
4259                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4260                 return rte_eth_linkstatus_set(dev, &link);
4261         }
4262
4263         if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
4264                 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
4265                 if ((esdp_reg & IXGBE_ESDP_SDP3))
4266                         link_up = 0;
4267         }
4268
4269         if (link_up == 0) {
4270                 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
4271                         ixgbe_dev_wait_setup_link_complete(dev, 0);
4272                         if (rte_atomic32_test_and_set(&ad->link_thread_running)) {
4273                                 /* To avoid race condition between threads, set
4274                                  * the IXGBE_FLAG_NEED_LINK_CONFIG flag only
4275                                  * when there is no link thread running.
4276                                  */
4277                                 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
4278                                 if (rte_ctrl_thread_create(&ad->link_thread_tid,
4279                                         "ixgbe-link-handler",
4280                                         NULL,
4281                                         ixgbe_dev_setup_link_thread_handler,
4282                                         dev) < 0) {
4283                                         PMD_DRV_LOG(ERR,
4284                                                 "Create link thread failed!");
4285                                         rte_atomic32_clear(&ad->link_thread_running);
4286                                 }
4287                         } else {
4288                                 PMD_DRV_LOG(ERR,
4289                                         "Other link thread is running now!");
4290                         }
4291                 }
4292                 return rte_eth_linkstatus_set(dev, &link);
4293         }
4294
4295         link.link_status = ETH_LINK_UP;
4296         link.link_duplex = ETH_LINK_FULL_DUPLEX;
4297
4298         switch (link_speed) {
4299         default:
4300         case IXGBE_LINK_SPEED_UNKNOWN:
4301                 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4302                         hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4303                         link.link_speed = ETH_SPEED_NUM_10M;
4304                 else
4305                         link.link_speed = ETH_SPEED_NUM_100M;
4306                 break;
4307
4308         case IXGBE_LINK_SPEED_10_FULL:
4309                 link.link_speed = ETH_SPEED_NUM_10M;
4310                 break;
4311
4312         case IXGBE_LINK_SPEED_100_FULL:
4313                 link.link_speed = ETH_SPEED_NUM_100M;
4314                 break;
4315
4316         case IXGBE_LINK_SPEED_1GB_FULL:
4317                 link.link_speed = ETH_SPEED_NUM_1G;
4318                 break;
4319
4320         case IXGBE_LINK_SPEED_2_5GB_FULL:
4321                 link.link_speed = ETH_SPEED_NUM_2_5G;
4322                 break;
4323
4324         case IXGBE_LINK_SPEED_5GB_FULL:
4325                 link.link_speed = ETH_SPEED_NUM_5G;
4326                 break;
4327
4328         case IXGBE_LINK_SPEED_10GB_FULL:
4329                 link.link_speed = ETH_SPEED_NUM_10G;
4330                 break;
4331         }
4332
4333         return rte_eth_linkstatus_set(dev, &link);
4334 }
4335
4336 static int
4337 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4338 {
4339         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4340 }
4341
4342 static int
4343 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4344 {
4345         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4346 }
4347
4348 static int
4349 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4350 {
4351         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4352         uint32_t fctrl;
4353
4354         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4355         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4356         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4357
4358         return 0;
4359 }
4360
4361 static int
4362 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4363 {
4364         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4365         uint32_t fctrl;
4366
4367         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4368         fctrl &= (~IXGBE_FCTRL_UPE);
4369         if (dev->data->all_multicast == 1)
4370                 fctrl |= IXGBE_FCTRL_MPE;
4371         else
4372                 fctrl &= (~IXGBE_FCTRL_MPE);
4373         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4374
4375         return 0;
4376 }
4377
4378 static int
4379 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4380 {
4381         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4382         uint32_t fctrl;
4383
4384         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4385         fctrl |= IXGBE_FCTRL_MPE;
4386         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4387
4388         return 0;
4389 }
4390
4391 static int
4392 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4393 {
4394         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4395         uint32_t fctrl;
4396
4397         if (dev->data->promiscuous == 1)
4398                 return 0; /* must remain in all_multicast mode */
4399
4400         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4401         fctrl &= (~IXGBE_FCTRL_MPE);
4402         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4403
4404         return 0;
4405 }
4406
4407 /**
4408  * It clears the interrupt causes and enables the interrupt.
4409  * It will be called once only during nic initialized.
4410  *
4411  * @param dev
4412  *  Pointer to struct rte_eth_dev.
4413  * @param on
4414  *  Enable or Disable.
4415  *
4416  * @return
4417  *  - On success, zero.
4418  *  - On failure, a negative value.
4419  */
4420 static int
4421 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4422 {
4423         struct ixgbe_interrupt *intr =
4424                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4425
4426         ixgbe_dev_link_status_print(dev);
4427         if (on)
4428                 intr->mask |= IXGBE_EICR_LSC;
4429         else
4430                 intr->mask &= ~IXGBE_EICR_LSC;
4431
4432         return 0;
4433 }
4434
4435 /**
4436  * It clears the interrupt causes and enables the interrupt.
4437  * It will be called once only during nic initialized.
4438  *
4439  * @param dev
4440  *  Pointer to struct rte_eth_dev.
4441  *
4442  * @return
4443  *  - On success, zero.
4444  *  - On failure, a negative value.
4445  */
4446 static int
4447 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4448 {
4449         struct ixgbe_interrupt *intr =
4450                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4451
4452         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4453
4454         return 0;
4455 }
4456
4457 /**
4458  * It clears the interrupt causes and enables the interrupt.
4459  * It will be called once only during nic initialized.
4460  *
4461  * @param dev
4462  *  Pointer to struct rte_eth_dev.
4463  *
4464  * @return
4465  *  - On success, zero.
4466  *  - On failure, a negative value.
4467  */
4468 static int
4469 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4470 {
4471         struct ixgbe_interrupt *intr =
4472                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4473
4474         intr->mask |= IXGBE_EICR_LINKSEC;
4475
4476         return 0;
4477 }
4478
4479 /*
4480  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4481  *
4482  * @param dev
4483  *  Pointer to struct rte_eth_dev.
4484  *
4485  * @return
4486  *  - On success, zero.
4487  *  - On failure, a negative value.
4488  */
4489 static int
4490 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4491 {
4492         uint32_t eicr;
4493         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4494         struct ixgbe_interrupt *intr =
4495                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4496
4497         /* clear all cause mask */
4498         ixgbe_disable_intr(hw);
4499
4500         /* read-on-clear nic registers here */
4501         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4502         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4503
4504         intr->flags = 0;
4505
4506         /* set flag for async link update */
4507         if (eicr & IXGBE_EICR_LSC)
4508                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4509
4510         if (eicr & IXGBE_EICR_MAILBOX)
4511                 intr->flags |= IXGBE_FLAG_MAILBOX;
4512
4513         if (eicr & IXGBE_EICR_LINKSEC)
4514                 intr->flags |= IXGBE_FLAG_MACSEC;
4515
4516         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4517             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4518             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4519                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4520
4521         return 0;
4522 }
4523
4524 /**
4525  * It gets and then prints the link status.
4526  *
4527  * @param dev
4528  *  Pointer to struct rte_eth_dev.
4529  *
4530  * @return
4531  *  - On success, zero.
4532  *  - On failure, a negative value.
4533  */
4534 static void
4535 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4536 {
4537         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4538         struct rte_eth_link link;
4539
4540         rte_eth_linkstatus_get(dev, &link);
4541
4542         if (link.link_status) {
4543                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4544                                         (int)(dev->data->port_id),
4545                                         (unsigned)link.link_speed,
4546                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4547                                         "full-duplex" : "half-duplex");
4548         } else {
4549                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4550                                 (int)(dev->data->port_id));
4551         }
4552         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4553                                 pci_dev->addr.domain,
4554                                 pci_dev->addr.bus,
4555                                 pci_dev->addr.devid,
4556                                 pci_dev->addr.function);
4557 }
4558
4559 /*
4560  * It executes link_update after knowing an interrupt occurred.
4561  *
4562  * @param dev
4563  *  Pointer to struct rte_eth_dev.
4564  *
4565  * @return
4566  *  - On success, zero.
4567  *  - On failure, a negative value.
4568  */
4569 static int
4570 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
4571 {
4572         struct ixgbe_interrupt *intr =
4573                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4574         int64_t timeout;
4575         struct ixgbe_hw *hw =
4576                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4577
4578         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4579
4580         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4581                 ixgbe_pf_mbx_process(dev);
4582                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4583         }
4584
4585         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4586                 ixgbe_handle_lasi(hw);
4587                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4588         }
4589
4590         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4591                 struct rte_eth_link link;
4592
4593                 /* get the link status before link update, for predicting later */
4594                 rte_eth_linkstatus_get(dev, &link);
4595
4596                 ixgbe_dev_link_update(dev, 0);
4597
4598                 /* likely to up */
4599                 if (!link.link_status)
4600                         /* handle it 1 sec later, wait it being stable */
4601                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4602                 /* likely to down */
4603                 else
4604                         /* handle it 4 sec later, wait it being stable */
4605                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4606
4607                 ixgbe_dev_link_status_print(dev);
4608                 if (rte_eal_alarm_set(timeout * 1000,
4609                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4610                         PMD_DRV_LOG(ERR, "Error setting alarm");
4611                 else {
4612                         /* remember original mask */
4613                         intr->mask_original = intr->mask;
4614                         /* only disable lsc interrupt */
4615                         intr->mask &= ~IXGBE_EIMS_LSC;
4616                 }
4617         }
4618
4619         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4620         ixgbe_enable_intr(dev);
4621
4622         return 0;
4623 }
4624
4625 /**
4626  * Interrupt handler which shall be registered for alarm callback for delayed
4627  * handling specific interrupt to wait for the stable nic state. As the
4628  * NIC interrupt state is not stable for ixgbe after link is just down,
4629  * it needs to wait 4 seconds to get the stable status.
4630  *
4631  * @param handle
4632  *  Pointer to interrupt handle.
4633  * @param param
4634  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4635  *
4636  * @return
4637  *  void
4638  */
4639 static void
4640 ixgbe_dev_interrupt_delayed_handler(void *param)
4641 {
4642         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4643         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4644         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4645         struct ixgbe_interrupt *intr =
4646                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4647         struct ixgbe_hw *hw =
4648                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4649         uint32_t eicr;
4650
4651         ixgbe_disable_intr(hw);
4652
4653         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4654         if (eicr & IXGBE_EICR_MAILBOX)
4655                 ixgbe_pf_mbx_process(dev);
4656
4657         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4658                 ixgbe_handle_lasi(hw);
4659                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4660         }
4661
4662         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4663                 ixgbe_dev_link_update(dev, 0);
4664                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4665                 ixgbe_dev_link_status_print(dev);
4666                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4667                                               NULL);
4668         }
4669
4670         if (intr->flags & IXGBE_FLAG_MACSEC) {
4671                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4672                                               NULL);
4673                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4674         }
4675
4676         /* restore original mask */
4677         intr->mask = intr->mask_original;
4678         intr->mask_original = 0;
4679
4680         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4681         ixgbe_enable_intr(dev);
4682         rte_intr_ack(intr_handle);
4683 }
4684
4685 /**
4686  * Interrupt handler triggered by NIC  for handling
4687  * specific interrupt.
4688  *
4689  * @param handle
4690  *  Pointer to interrupt handle.
4691  * @param param
4692  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4693  *
4694  * @return
4695  *  void
4696  */
4697 static void
4698 ixgbe_dev_interrupt_handler(void *param)
4699 {
4700         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4701
4702         ixgbe_dev_interrupt_get_status(dev);
4703         ixgbe_dev_interrupt_action(dev);
4704 }
4705
4706 static int
4707 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4708 {
4709         struct ixgbe_hw *hw;
4710
4711         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4712         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4713 }
4714
4715 static int
4716 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4717 {
4718         struct ixgbe_hw *hw;
4719
4720         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4721         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4722 }
4723
4724 static int
4725 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4726 {
4727         struct ixgbe_hw *hw;
4728         uint32_t mflcn_reg;
4729         uint32_t fccfg_reg;
4730         int rx_pause;
4731         int tx_pause;
4732
4733         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4734
4735         fc_conf->pause_time = hw->fc.pause_time;
4736         fc_conf->high_water = hw->fc.high_water[0];
4737         fc_conf->low_water = hw->fc.low_water[0];
4738         fc_conf->send_xon = hw->fc.send_xon;
4739         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4740
4741         /*
4742          * Return rx_pause status according to actual setting of
4743          * MFLCN register.
4744          */
4745         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4746         if (mflcn_reg & IXGBE_MFLCN_PMCF)
4747                 fc_conf->mac_ctrl_frame_fwd = 1;
4748         else
4749                 fc_conf->mac_ctrl_frame_fwd = 0;
4750
4751         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4752                 rx_pause = 1;
4753         else
4754                 rx_pause = 0;
4755
4756         /*
4757          * Return tx_pause status according to actual setting of
4758          * FCCFG register.
4759          */
4760         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4761         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4762                 tx_pause = 1;
4763         else
4764                 tx_pause = 0;
4765
4766         if (rx_pause && tx_pause)
4767                 fc_conf->mode = RTE_FC_FULL;
4768         else if (rx_pause)
4769                 fc_conf->mode = RTE_FC_RX_PAUSE;
4770         else if (tx_pause)
4771                 fc_conf->mode = RTE_FC_TX_PAUSE;
4772         else
4773                 fc_conf->mode = RTE_FC_NONE;
4774
4775         return 0;
4776 }
4777
4778 static int
4779 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4780 {
4781         struct ixgbe_hw *hw;
4782         struct ixgbe_adapter *adapter = dev->data->dev_private;
4783         int err;
4784         uint32_t rx_buf_size;
4785         uint32_t max_high_water;
4786         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4787                 ixgbe_fc_none,
4788                 ixgbe_fc_rx_pause,
4789                 ixgbe_fc_tx_pause,
4790                 ixgbe_fc_full
4791         };
4792
4793         PMD_INIT_FUNC_TRACE();
4794
4795         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4796         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4797         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4798
4799         /*
4800          * At least reserve one Ethernet frame for watermark
4801          * high_water/low_water in kilo bytes for ixgbe
4802          */
4803         max_high_water = (rx_buf_size -
4804                         RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4805         if ((fc_conf->high_water > max_high_water) ||
4806                 (fc_conf->high_water < fc_conf->low_water)) {
4807                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4808                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4809                 return -EINVAL;
4810         }
4811
4812         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4813         hw->fc.pause_time     = fc_conf->pause_time;
4814         hw->fc.high_water[0]  = fc_conf->high_water;
4815         hw->fc.low_water[0]   = fc_conf->low_water;
4816         hw->fc.send_xon       = fc_conf->send_xon;
4817         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4818         adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
4819
4820         err = ixgbe_flow_ctrl_enable(dev, hw);
4821         if (err < 0) {
4822                 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err);
4823                 return -EIO;
4824         }
4825         return err;
4826 }
4827
4828 /**
4829  *  ixgbe_pfc_enable_generic - Enable flow control
4830  *  @hw: pointer to hardware structure
4831  *  @tc_num: traffic class number
4832  *  Enable flow control according to the current settings.
4833  */
4834 static int
4835 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4836 {
4837         int ret_val = 0;
4838         uint32_t mflcn_reg, fccfg_reg;
4839         uint32_t reg;
4840         uint32_t fcrtl, fcrth;
4841         uint8_t i;
4842         uint8_t nb_rx_en;
4843
4844         /* Validate the water mark configuration */
4845         if (!hw->fc.pause_time) {
4846                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4847                 goto out;
4848         }
4849
4850         /* Low water mark of zero causes XOFF floods */
4851         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4852                  /* High/Low water can not be 0 */
4853                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4854                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4855                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4856                         goto out;
4857                 }
4858
4859                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4860                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4861                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4862                         goto out;
4863                 }
4864         }
4865         /* Negotiate the fc mode to use */
4866         ixgbe_fc_autoneg(hw);
4867
4868         /* Disable any previous flow control settings */
4869         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4870         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4871
4872         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4873         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4874
4875         switch (hw->fc.current_mode) {
4876         case ixgbe_fc_none:
4877                 /*
4878                  * If the count of enabled RX Priority Flow control >1,
4879                  * and the TX pause can not be disabled
4880                  */
4881                 nb_rx_en = 0;
4882                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4883                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4884                         if (reg & IXGBE_FCRTH_FCEN)
4885                                 nb_rx_en++;
4886                 }
4887                 if (nb_rx_en > 1)
4888                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4889                 break;
4890         case ixgbe_fc_rx_pause:
4891                 /*
4892                  * Rx Flow control is enabled and Tx Flow control is
4893                  * disabled by software override. Since there really
4894                  * isn't a way to advertise that we are capable of RX
4895                  * Pause ONLY, we will advertise that we support both
4896                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4897                  * disable the adapter's ability to send PAUSE frames.
4898                  */
4899                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4900                 /*
4901                  * If the count of enabled RX Priority Flow control >1,
4902                  * and the TX pause can not be disabled
4903                  */
4904                 nb_rx_en = 0;
4905                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4906                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4907                         if (reg & IXGBE_FCRTH_FCEN)
4908                                 nb_rx_en++;
4909                 }
4910                 if (nb_rx_en > 1)
4911                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4912                 break;
4913         case ixgbe_fc_tx_pause:
4914                 /*
4915                  * Tx Flow control is enabled, and Rx Flow control is
4916                  * disabled by software override.
4917                  */
4918                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4919                 break;
4920         case ixgbe_fc_full:
4921                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4922                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4923                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4924                 break;
4925         default:
4926                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4927                 ret_val = IXGBE_ERR_CONFIG;
4928                 goto out;
4929         }
4930
4931         /* Set 802.3x based flow control settings. */
4932         mflcn_reg |= IXGBE_MFLCN_DPF;
4933         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4934         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4935
4936         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4937         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4938                 hw->fc.high_water[tc_num]) {
4939                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4940                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4941                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4942         } else {
4943                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4944                 /*
4945                  * In order to prevent Tx hangs when the internal Tx
4946                  * switch is enabled we must set the high water mark
4947                  * to the maximum FCRTH value.  This allows the Tx
4948                  * switch to function even under heavy Rx workloads.
4949                  */
4950                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4951         }
4952         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4953
4954         /* Configure pause time (2 TCs per register) */
4955         reg = hw->fc.pause_time * 0x00010001;
4956         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4957                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4958
4959         /* Configure flow control refresh threshold value */
4960         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4961
4962 out:
4963         return ret_val;
4964 }
4965
4966 static int
4967 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4968 {
4969         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4970         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4971
4972         if (hw->mac.type != ixgbe_mac_82598EB) {
4973                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4974         }
4975         return ret_val;
4976 }
4977
4978 static int
4979 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4980 {
4981         int err;
4982         uint32_t rx_buf_size;
4983         uint32_t max_high_water;
4984         uint8_t tc_num;
4985         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4986         struct ixgbe_hw *hw =
4987                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4988         struct ixgbe_dcb_config *dcb_config =
4989                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4990
4991         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4992                 ixgbe_fc_none,
4993                 ixgbe_fc_rx_pause,
4994                 ixgbe_fc_tx_pause,
4995                 ixgbe_fc_full
4996         };
4997
4998         PMD_INIT_FUNC_TRACE();
4999
5000         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
5001         tc_num = map[pfc_conf->priority];
5002         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
5003         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
5004         /*
5005          * At least reserve one Ethernet frame for watermark
5006          * high_water/low_water in kilo bytes for ixgbe
5007          */
5008         max_high_water = (rx_buf_size -
5009                         RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
5010         if ((pfc_conf->fc.high_water > max_high_water) ||
5011             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
5012                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
5013                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
5014                 return -EINVAL;
5015         }
5016
5017         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
5018         hw->fc.pause_time = pfc_conf->fc.pause_time;
5019         hw->fc.send_xon = pfc_conf->fc.send_xon;
5020         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
5021         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
5022
5023         err = ixgbe_dcb_pfc_enable(dev, tc_num);
5024
5025         /* Not negotiated is not an error case */
5026         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
5027                 return 0;
5028
5029         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
5030         return -EIO;
5031 }
5032
5033 static int
5034 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
5035                           struct rte_eth_rss_reta_entry64 *reta_conf,
5036                           uint16_t reta_size)
5037 {
5038         uint16_t i, sp_reta_size;
5039         uint8_t j, mask;
5040         uint32_t reta, r;
5041         uint16_t idx, shift;
5042         struct ixgbe_adapter *adapter = dev->data->dev_private;
5043         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5044         uint32_t reta_reg;
5045
5046         PMD_INIT_FUNC_TRACE();
5047
5048         if (!ixgbe_rss_update_sp(hw->mac.type)) {
5049                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
5050                         "NIC.");
5051                 return -ENOTSUP;
5052         }
5053
5054         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5055         if (reta_size != sp_reta_size) {
5056                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
5057                         "(%d) doesn't match the number hardware can supported "
5058                         "(%d)", reta_size, sp_reta_size);
5059                 return -EINVAL;
5060         }
5061
5062         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
5063                 idx = i / RTE_RETA_GROUP_SIZE;
5064                 shift = i % RTE_RETA_GROUP_SIZE;
5065                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
5066                                                 IXGBE_4_BIT_MASK);
5067                 if (!mask)
5068                         continue;
5069                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5070                 if (mask == IXGBE_4_BIT_MASK)
5071                         r = 0;
5072                 else
5073                         r = IXGBE_READ_REG(hw, reta_reg);
5074                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
5075                         if (mask & (0x1 << j))
5076                                 reta |= reta_conf[idx].reta[shift + j] <<
5077                                                         (CHAR_BIT * j);
5078                         else
5079                                 reta |= r & (IXGBE_8_BIT_MASK <<
5080                                                 (CHAR_BIT * j));
5081                 }
5082                 IXGBE_WRITE_REG(hw, reta_reg, reta);
5083         }
5084         adapter->rss_reta_updated = 1;
5085
5086         return 0;
5087 }
5088
5089 static int
5090 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
5091                          struct rte_eth_rss_reta_entry64 *reta_conf,
5092                          uint16_t reta_size)
5093 {
5094         uint16_t i, sp_reta_size;
5095         uint8_t j, mask;
5096         uint32_t reta;
5097         uint16_t idx, shift;
5098         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5099         uint32_t reta_reg;
5100
5101         PMD_INIT_FUNC_TRACE();
5102         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5103         if (reta_size != sp_reta_size) {
5104                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
5105                         "(%d) doesn't match the number hardware can supported "
5106                         "(%d)", reta_size, sp_reta_size);
5107                 return -EINVAL;
5108         }
5109
5110         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
5111                 idx = i / RTE_RETA_GROUP_SIZE;
5112                 shift = i % RTE_RETA_GROUP_SIZE;
5113                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
5114                                                 IXGBE_4_BIT_MASK);
5115                 if (!mask)
5116                         continue;
5117
5118                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5119                 reta = IXGBE_READ_REG(hw, reta_reg);
5120                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
5121                         if (mask & (0x1 << j))
5122                                 reta_conf[idx].reta[shift + j] =
5123                                         ((reta >> (CHAR_BIT * j)) &
5124                                                 IXGBE_8_BIT_MASK);
5125                 }
5126         }
5127
5128         return 0;
5129 }
5130
5131 static int
5132 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
5133                                 uint32_t index, uint32_t pool)
5134 {
5135         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5136         uint32_t enable_addr = 1;
5137
5138         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
5139                              pool, enable_addr);
5140 }
5141
5142 static void
5143 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
5144 {
5145         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5146
5147         ixgbe_clear_rar(hw, index);
5148 }
5149
5150 static int
5151 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
5152 {
5153         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5154
5155         ixgbe_remove_rar(dev, 0);
5156         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
5157
5158         return 0;
5159 }
5160
5161 static bool
5162 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
5163 {
5164         if (strcmp(dev->device->driver->name, drv->driver.name))
5165                 return false;
5166
5167         return true;
5168 }
5169
5170 bool
5171 is_ixgbe_supported(struct rte_eth_dev *dev)
5172 {
5173         return is_device_supported(dev, &rte_ixgbe_pmd);
5174 }
5175
5176 static int
5177 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
5178 {
5179         uint32_t hlreg0;
5180         uint32_t maxfrs;
5181         struct ixgbe_hw *hw;
5182         struct rte_eth_dev_info dev_info;
5183         uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
5184         struct rte_eth_dev_data *dev_data = dev->data;
5185         int ret;
5186
5187         ret = ixgbe_dev_info_get(dev, &dev_info);
5188         if (ret != 0)
5189                 return ret;
5190
5191         /* check that mtu is within the allowed range */
5192         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
5193                 return -EINVAL;
5194
5195         /* If device is started, refuse mtu that requires the support of
5196          * scattered packets when this feature has not been enabled before.
5197          */
5198         if (dev_data->dev_started && !dev_data->scattered_rx &&
5199             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
5200              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
5201                 PMD_INIT_LOG(ERR, "Stop port first.");
5202                 return -EINVAL;
5203         }
5204
5205         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5206         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5207
5208         /* switch to jumbo mode if needed */
5209         if (frame_size > RTE_ETHER_MAX_LEN) {
5210                 dev->data->dev_conf.rxmode.offloads |=
5211                         DEV_RX_OFFLOAD_JUMBO_FRAME;
5212                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
5213         } else {
5214                 dev->data->dev_conf.rxmode.offloads &=
5215                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
5216                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
5217         }
5218         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5219
5220         /* update max frame size */
5221         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
5222
5223         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
5224         maxfrs &= 0x0000FFFF;
5225         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
5226         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
5227
5228         return 0;
5229 }
5230
5231 /*
5232  * Virtual Function operations
5233  */
5234 static void
5235 ixgbevf_intr_disable(struct rte_eth_dev *dev)
5236 {
5237         struct ixgbe_interrupt *intr =
5238                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5239         struct ixgbe_hw *hw =
5240                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5241
5242         PMD_INIT_FUNC_TRACE();
5243
5244         /* Clear interrupt mask to stop from interrupts being generated */
5245         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
5246
5247         IXGBE_WRITE_FLUSH(hw);
5248
5249         /* Clear mask value. */
5250         intr->mask = 0;
5251 }
5252
5253 static void
5254 ixgbevf_intr_enable(struct rte_eth_dev *dev)
5255 {
5256         struct ixgbe_interrupt *intr =
5257                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5258         struct ixgbe_hw *hw =
5259                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5260
5261         PMD_INIT_FUNC_TRACE();
5262
5263         /* VF enable interrupt autoclean */
5264         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
5265         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
5266         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
5267
5268         IXGBE_WRITE_FLUSH(hw);
5269
5270         /* Save IXGBE_VTEIMS value to mask. */
5271         intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
5272 }
5273
5274 static int
5275 ixgbevf_dev_configure(struct rte_eth_dev *dev)
5276 {
5277         struct rte_eth_conf *conf = &dev->data->dev_conf;
5278         struct ixgbe_adapter *adapter = dev->data->dev_private;
5279
5280         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
5281                      dev->data->port_id);
5282
5283         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
5284                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
5285
5286         /*
5287          * VF has no ability to enable/disable HW CRC
5288          * Keep the persistent behavior the same as Host PF
5289          */
5290 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
5291         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
5292                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
5293                 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
5294         }
5295 #else
5296         if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
5297                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
5298                 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
5299         }
5300 #endif
5301
5302         /*
5303          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5304          * allocation or vector Rx preconditions we will reset it.
5305          */
5306         adapter->rx_bulk_alloc_allowed = true;
5307         adapter->rx_vec_allowed = true;
5308
5309         return 0;
5310 }
5311
5312 static int
5313 ixgbevf_dev_start(struct rte_eth_dev *dev)
5314 {
5315         struct ixgbe_hw *hw =
5316                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5317         uint32_t intr_vector = 0;
5318         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5319         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5320
5321         int err, mask = 0;
5322
5323         PMD_INIT_FUNC_TRACE();
5324
5325         /* Stop the link setup handler before resetting the HW. */
5326         ixgbe_dev_wait_setup_link_complete(dev, 0);
5327
5328         err = hw->mac.ops.reset_hw(hw);
5329         if (err) {
5330                 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
5331                 return err;
5332         }
5333         hw->mac.get_link_status = true;
5334
5335         /* negotiate mailbox API version to use with the PF. */
5336         ixgbevf_negotiate_api(hw);
5337
5338         ixgbevf_dev_tx_init(dev);
5339
5340         /* This can fail when allocating mbufs for descriptor rings */
5341         err = ixgbevf_dev_rx_init(dev);
5342         if (err) {
5343                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5344                 ixgbe_dev_clear_queues(dev);
5345                 return err;
5346         }
5347
5348         /* Set vfta */
5349         ixgbevf_set_vfta_all(dev, 1);
5350
5351         /* Set HW strip */
5352         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5353                 ETH_VLAN_EXTEND_MASK;
5354         err = ixgbevf_vlan_offload_config(dev, mask);
5355         if (err) {
5356                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
5357                 ixgbe_dev_clear_queues(dev);
5358                 return err;
5359         }
5360
5361         ixgbevf_dev_rxtx_start(dev);
5362
5363         /* check and configure queue intr-vector mapping */
5364         if (rte_intr_cap_multiple(intr_handle) &&
5365             dev->data->dev_conf.intr_conf.rxq) {
5366                 /* According to datasheet, only vector 0/1/2 can be used,
5367                  * now only one vector is used for Rx queue
5368                  */
5369                 intr_vector = 1;
5370                 if (rte_intr_efd_enable(intr_handle, intr_vector))
5371                         return -1;
5372         }
5373
5374         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5375                 intr_handle->intr_vec =
5376                         rte_zmalloc("intr_vec",
5377                                     dev->data->nb_rx_queues * sizeof(int), 0);
5378                 if (intr_handle->intr_vec == NULL) {
5379                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5380                                      " intr_vec", dev->data->nb_rx_queues);
5381                         return -ENOMEM;
5382                 }
5383         }
5384         ixgbevf_configure_msix(dev);
5385
5386         /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5387          * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5388          * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5389          * is not cleared, it will fail when following rte_intr_enable( ) tries
5390          * to map Rx queue interrupt to other VFIO vectors.
5391          * So clear uio/vfio intr/evevnfd first to avoid failure.
5392          */
5393         rte_intr_disable(intr_handle);
5394
5395         rte_intr_enable(intr_handle);
5396
5397         /* Re-enable interrupt for VF */
5398         ixgbevf_intr_enable(dev);
5399
5400         /*
5401          * Update link status right before return, because it may
5402          * start link configuration process in a separate thread.
5403          */
5404         ixgbevf_dev_link_update(dev, 0);
5405
5406         hw->adapter_stopped = false;
5407
5408         return 0;
5409 }
5410
5411 static void
5412 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5413 {
5414         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5415         struct ixgbe_adapter *adapter = dev->data->dev_private;
5416         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5417         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5418
5419         if (hw->adapter_stopped)
5420                 return;
5421
5422         PMD_INIT_FUNC_TRACE();
5423
5424         ixgbe_dev_wait_setup_link_complete(dev, 0);
5425
5426         ixgbevf_intr_disable(dev);
5427
5428         hw->adapter_stopped = 1;
5429         ixgbe_stop_adapter(hw);
5430
5431         /*
5432           * Clear what we set, but we still keep shadow_vfta to
5433           * restore after device starts
5434           */
5435         ixgbevf_set_vfta_all(dev, 0);
5436
5437         /* Clear stored conf */
5438         dev->data->scattered_rx = 0;
5439
5440         ixgbe_dev_clear_queues(dev);
5441
5442         /* Clean datapath event and queue/vec mapping */
5443         rte_intr_efd_disable(intr_handle);
5444         if (intr_handle->intr_vec != NULL) {
5445                 rte_free(intr_handle->intr_vec);
5446                 intr_handle->intr_vec = NULL;
5447         }
5448
5449         adapter->rss_reta_updated = 0;
5450 }
5451
5452 static void
5453 ixgbevf_dev_close(struct rte_eth_dev *dev)
5454 {
5455         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5456         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5457         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5458
5459         PMD_INIT_FUNC_TRACE();
5460
5461         ixgbe_reset_hw(hw);
5462
5463         ixgbevf_dev_stop(dev);
5464
5465         ixgbe_dev_free_queues(dev);
5466
5467         /**
5468          * Remove the VF MAC address ro ensure
5469          * that the VF traffic goes to the PF
5470          * after stop, close and detach of the VF
5471          **/
5472         ixgbevf_remove_mac_addr(dev, 0);
5473
5474         dev->dev_ops = NULL;
5475         dev->rx_pkt_burst = NULL;
5476         dev->tx_pkt_burst = NULL;
5477
5478         rte_intr_disable(intr_handle);
5479         rte_intr_callback_unregister(intr_handle,
5480                                      ixgbevf_dev_interrupt_handler, dev);
5481 }
5482
5483 /*
5484  * Reset VF device
5485  */
5486 static int
5487 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5488 {
5489         int ret;
5490
5491         ret = eth_ixgbevf_dev_uninit(dev);
5492         if (ret)
5493                 return ret;
5494
5495         ret = eth_ixgbevf_dev_init(dev);
5496
5497         return ret;
5498 }
5499
5500 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5501 {
5502         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5503         struct ixgbe_vfta *shadow_vfta =
5504                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5505         int i = 0, j = 0, vfta = 0, mask = 1;
5506
5507         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5508                 vfta = shadow_vfta->vfta[i];
5509                 if (vfta) {
5510                         mask = 1;
5511                         for (j = 0; j < 32; j++) {
5512                                 if (vfta & mask)
5513                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5514                                                        on, false);
5515                                 mask <<= 1;
5516                         }
5517                 }
5518         }
5519
5520 }
5521
5522 static int
5523 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5524 {
5525         struct ixgbe_hw *hw =
5526                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5527         struct ixgbe_vfta *shadow_vfta =
5528                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5529         uint32_t vid_idx = 0;
5530         uint32_t vid_bit = 0;
5531         int ret = 0;
5532
5533         PMD_INIT_FUNC_TRACE();
5534
5535         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5536         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5537         if (ret) {
5538                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5539                 return ret;
5540         }
5541         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5542         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5543
5544         /* Save what we set and retore it after device reset */
5545         if (on)
5546                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5547         else
5548                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5549
5550         return 0;
5551 }
5552
5553 static void
5554 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5555 {
5556         struct ixgbe_hw *hw =
5557                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5558         uint32_t ctrl;
5559
5560         PMD_INIT_FUNC_TRACE();
5561
5562         if (queue >= hw->mac.max_rx_queues)
5563                 return;
5564
5565         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5566         if (on)
5567                 ctrl |= IXGBE_RXDCTL_VME;
5568         else
5569                 ctrl &= ~IXGBE_RXDCTL_VME;
5570         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5571
5572         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5573 }
5574
5575 static int
5576 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
5577 {
5578         struct ixgbe_rx_queue *rxq;
5579         uint16_t i;
5580         int on = 0;
5581
5582         /* VF function only support hw strip feature, others are not support */
5583         if (mask & ETH_VLAN_STRIP_MASK) {
5584                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5585                         rxq = dev->data->rx_queues[i];
5586                         on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
5587                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5588                 }
5589         }
5590
5591         return 0;
5592 }
5593
5594 static int
5595 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5596 {
5597         ixgbe_config_vlan_strip_on_all_queues(dev, mask);
5598
5599         ixgbevf_vlan_offload_config(dev, mask);
5600
5601         return 0;
5602 }
5603
5604 int
5605 ixgbe_vt_check(struct ixgbe_hw *hw)
5606 {
5607         uint32_t reg_val;
5608
5609         /* if Virtualization Technology is enabled */
5610         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5611         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5612                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5613                 return -1;
5614         }
5615
5616         return 0;
5617 }
5618
5619 static uint32_t
5620 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr)
5621 {
5622         uint32_t vector = 0;
5623
5624         switch (hw->mac.mc_filter_type) {
5625         case 0:   /* use bits [47:36] of the address */
5626                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5627                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5628                 break;
5629         case 1:   /* use bits [46:35] of the address */
5630                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5631                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5632                 break;
5633         case 2:   /* use bits [45:34] of the address */
5634                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5635                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5636                 break;
5637         case 3:   /* use bits [43:32] of the address */
5638                 vector = ((uc_addr->addr_bytes[4]) |
5639                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5640                 break;
5641         default:  /* Invalid mc_filter_type */
5642                 break;
5643         }
5644
5645         /* vector can only be 12-bits or boundary will be exceeded */
5646         vector &= 0xFFF;
5647         return vector;
5648 }
5649
5650 static int
5651 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,
5652                         struct rte_ether_addr *mac_addr, uint8_t on)
5653 {
5654         uint32_t vector;
5655         uint32_t uta_idx;
5656         uint32_t reg_val;
5657         uint32_t uta_shift;
5658         uint32_t rc;
5659         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5660         const uint32_t ixgbe_uta_bit_shift = 5;
5661         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5662         const uint32_t bit1 = 0x1;
5663
5664         struct ixgbe_hw *hw =
5665                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5666         struct ixgbe_uta_info *uta_info =
5667                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5668
5669         /* The UTA table only exists on 82599 hardware and newer */
5670         if (hw->mac.type < ixgbe_mac_82599EB)
5671                 return -ENOTSUP;
5672
5673         vector = ixgbe_uta_vector(hw, mac_addr);
5674         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5675         uta_shift = vector & ixgbe_uta_bit_mask;
5676
5677         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5678         if (rc == on)
5679                 return 0;
5680
5681         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5682         if (on) {
5683                 uta_info->uta_in_use++;
5684                 reg_val |= (bit1 << uta_shift);
5685                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5686         } else {
5687                 uta_info->uta_in_use--;
5688                 reg_val &= ~(bit1 << uta_shift);
5689                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5690         }
5691
5692         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5693
5694         if (uta_info->uta_in_use > 0)
5695                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5696                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5697         else
5698                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5699
5700         return 0;
5701 }
5702
5703 static int
5704 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5705 {
5706         int i;
5707         struct ixgbe_hw *hw =
5708                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5709         struct ixgbe_uta_info *uta_info =
5710                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5711
5712         /* The UTA table only exists on 82599 hardware and newer */
5713         if (hw->mac.type < ixgbe_mac_82599EB)
5714                 return -ENOTSUP;
5715
5716         if (on) {
5717                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5718                         uta_info->uta_shadow[i] = ~0;
5719                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5720                 }
5721         } else {
5722                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5723                         uta_info->uta_shadow[i] = 0;
5724                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5725                 }
5726         }
5727         return 0;
5728
5729 }
5730
5731 uint32_t
5732 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5733 {
5734         uint32_t new_val = orig_val;
5735
5736         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5737                 new_val |= IXGBE_VMOLR_AUPE;
5738         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5739                 new_val |= IXGBE_VMOLR_ROMPE;
5740         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5741                 new_val |= IXGBE_VMOLR_ROPE;
5742         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5743                 new_val |= IXGBE_VMOLR_BAM;
5744         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5745                 new_val |= IXGBE_VMOLR_MPE;
5746
5747         return new_val;
5748 }
5749
5750 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5751 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5752 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5753 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5754 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5755         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5756         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5757
5758 static int
5759 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5760                       struct rte_eth_mirror_conf *mirror_conf,
5761                       uint8_t rule_id, uint8_t on)
5762 {
5763         uint32_t mr_ctl, vlvf;
5764         uint32_t mp_lsb = 0;
5765         uint32_t mv_msb = 0;
5766         uint32_t mv_lsb = 0;
5767         uint32_t mp_msb = 0;
5768         uint8_t i = 0;
5769         int reg_index = 0;
5770         uint64_t vlan_mask = 0;
5771
5772         const uint8_t pool_mask_offset = 32;
5773         const uint8_t vlan_mask_offset = 32;
5774         const uint8_t dst_pool_offset = 8;
5775         const uint8_t rule_mr_offset  = 4;
5776         const uint8_t mirror_rule_mask = 0x0F;
5777
5778         struct ixgbe_mirror_info *mr_info =
5779                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5780         struct ixgbe_hw *hw =
5781                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5782         uint8_t mirror_type = 0;
5783
5784         if (ixgbe_vt_check(hw) < 0)
5785                 return -ENOTSUP;
5786
5787         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5788                 return -EINVAL;
5789
5790         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5791                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5792                             mirror_conf->rule_type);
5793                 return -EINVAL;
5794         }
5795
5796         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5797                 mirror_type |= IXGBE_MRCTL_VLME;
5798                 /* Check if vlan id is valid and find conresponding VLAN ID
5799                  * index in VLVF
5800                  */
5801                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5802                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5803                                 /* search vlan id related pool vlan filter
5804                                  * index
5805                                  */
5806                                 reg_index = ixgbe_find_vlvf_slot(
5807                                                 hw,
5808                                                 mirror_conf->vlan.vlan_id[i],
5809                                                 false);
5810                                 if (reg_index < 0)
5811                                         return -EINVAL;
5812                                 vlvf = IXGBE_READ_REG(hw,
5813                                                       IXGBE_VLVF(reg_index));
5814                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5815                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5816                                       mirror_conf->vlan.vlan_id[i]))
5817                                         vlan_mask |= (1ULL << reg_index);
5818                                 else
5819                                         return -EINVAL;
5820                         }
5821                 }
5822
5823                 if (on) {
5824                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5825                         mv_msb = vlan_mask >> vlan_mask_offset;
5826
5827                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5828                                                 mirror_conf->vlan.vlan_mask;
5829                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5830                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5831                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5832                                                 mirror_conf->vlan.vlan_id[i];
5833                         }
5834                 } else {
5835                         mv_lsb = 0;
5836                         mv_msb = 0;
5837                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5838                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5839                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5840                 }
5841         }
5842
5843         /**
5844          * if enable pool mirror, write related pool mask register,if disable
5845          * pool mirror, clear PFMRVM register
5846          */
5847         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5848                 mirror_type |= IXGBE_MRCTL_VPME;
5849                 if (on) {
5850                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5851                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5852                         mr_info->mr_conf[rule_id].pool_mask =
5853                                         mirror_conf->pool_mask;
5854
5855                 } else {
5856                         mp_lsb = 0;
5857                         mp_msb = 0;
5858                         mr_info->mr_conf[rule_id].pool_mask = 0;
5859                 }
5860         }
5861         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5862                 mirror_type |= IXGBE_MRCTL_UPME;
5863         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5864                 mirror_type |= IXGBE_MRCTL_DPME;
5865
5866         /* read  mirror control register and recalculate it */
5867         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5868
5869         if (on) {
5870                 mr_ctl |= mirror_type;
5871                 mr_ctl &= mirror_rule_mask;
5872                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5873         } else {
5874                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5875         }
5876
5877         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5878         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5879
5880         /* write mirrror control  register */
5881         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5882
5883         /* write pool mirrror control  register */
5884         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5885                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5886                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5887                                 mp_msb);
5888         }
5889         /* write VLAN mirrror control  register */
5890         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5891                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5892                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5893                                 mv_msb);
5894         }
5895
5896         return 0;
5897 }
5898
5899 static int
5900 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5901 {
5902         int mr_ctl = 0;
5903         uint32_t lsb_val = 0;
5904         uint32_t msb_val = 0;
5905         const uint8_t rule_mr_offset = 4;
5906
5907         struct ixgbe_hw *hw =
5908                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5909         struct ixgbe_mirror_info *mr_info =
5910                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5911
5912         if (ixgbe_vt_check(hw) < 0)
5913                 return -ENOTSUP;
5914
5915         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5916                 return -EINVAL;
5917
5918         memset(&mr_info->mr_conf[rule_id], 0,
5919                sizeof(struct rte_eth_mirror_conf));
5920
5921         /* clear PFVMCTL register */
5922         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5923
5924         /* clear pool mask register */
5925         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5926         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5927
5928         /* clear vlan mask register */
5929         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5930         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5931
5932         return 0;
5933 }
5934
5935 static int
5936 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5937 {
5938         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5939         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5940         struct ixgbe_interrupt *intr =
5941                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5942         struct ixgbe_hw *hw =
5943                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5944         uint32_t vec = IXGBE_MISC_VEC_ID;
5945
5946         if (rte_intr_allow_others(intr_handle))
5947                 vec = IXGBE_RX_VEC_START;
5948         intr->mask |= (1 << vec);
5949         RTE_SET_USED(queue_id);
5950         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5951
5952         rte_intr_ack(intr_handle);
5953
5954         return 0;
5955 }
5956
5957 static int
5958 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5959 {
5960         struct ixgbe_interrupt *intr =
5961                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5962         struct ixgbe_hw *hw =
5963                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5964         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5965         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5966         uint32_t vec = IXGBE_MISC_VEC_ID;
5967
5968         if (rte_intr_allow_others(intr_handle))
5969                 vec = IXGBE_RX_VEC_START;
5970         intr->mask &= ~(1 << vec);
5971         RTE_SET_USED(queue_id);
5972         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5973
5974         return 0;
5975 }
5976
5977 static int
5978 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5979 {
5980         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5981         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5982         uint32_t mask;
5983         struct ixgbe_hw *hw =
5984                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5985         struct ixgbe_interrupt *intr =
5986                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5987
5988         if (queue_id < 16) {
5989                 ixgbe_disable_intr(hw);
5990                 intr->mask |= (1 << queue_id);
5991                 ixgbe_enable_intr(dev);
5992         } else if (queue_id < 32) {
5993                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5994                 mask &= (1 << queue_id);
5995                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5996         } else if (queue_id < 64) {
5997                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5998                 mask &= (1 << (queue_id - 32));
5999                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
6000         }
6001         rte_intr_ack(intr_handle);
6002
6003         return 0;
6004 }
6005
6006 static int
6007 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
6008 {
6009         uint32_t mask;
6010         struct ixgbe_hw *hw =
6011                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6012         struct ixgbe_interrupt *intr =
6013                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
6014
6015         if (queue_id < 16) {
6016                 ixgbe_disable_intr(hw);
6017                 intr->mask &= ~(1 << queue_id);
6018                 ixgbe_enable_intr(dev);
6019         } else if (queue_id < 32) {
6020                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
6021                 mask &= ~(1 << queue_id);
6022                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
6023         } else if (queue_id < 64) {
6024                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
6025                 mask &= ~(1 << (queue_id - 32));
6026                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
6027         }
6028
6029         return 0;
6030 }
6031
6032 static void
6033 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
6034                      uint8_t queue, uint8_t msix_vector)
6035 {
6036         uint32_t tmp, idx;
6037
6038         if (direction == -1) {
6039                 /* other causes */
6040                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
6041                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
6042                 tmp &= ~0xFF;
6043                 tmp |= msix_vector;
6044                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
6045         } else {
6046                 /* rx or tx cause */
6047                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
6048                 idx = ((16 * (queue & 1)) + (8 * direction));
6049                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
6050                 tmp &= ~(0xFF << idx);
6051                 tmp |= (msix_vector << idx);
6052                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
6053         }
6054 }
6055
6056 /**
6057  * set the IVAR registers, mapping interrupt causes to vectors
6058  * @param hw
6059  *  pointer to ixgbe_hw struct
6060  * @direction
6061  *  0 for Rx, 1 for Tx, -1 for other causes
6062  * @queue
6063  *  queue to map the corresponding interrupt to
6064  * @msix_vector
6065  *  the vector to map to the corresponding queue
6066  */
6067 static void
6068 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
6069                    uint8_t queue, uint8_t msix_vector)
6070 {
6071         uint32_t tmp, idx;
6072
6073         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
6074         if (hw->mac.type == ixgbe_mac_82598EB) {
6075                 if (direction == -1)
6076                         direction = 0;
6077                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
6078                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
6079                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
6080                 tmp |= (msix_vector << (8 * (queue & 0x3)));
6081                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
6082         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
6083                         (hw->mac.type == ixgbe_mac_X540) ||
6084                         (hw->mac.type == ixgbe_mac_X550) ||
6085                         (hw->mac.type == ixgbe_mac_X550EM_x)) {
6086                 if (direction == -1) {
6087                         /* other causes */
6088                         idx = ((queue & 1) * 8);
6089                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
6090                         tmp &= ~(0xFF << idx);
6091                         tmp |= (msix_vector << idx);
6092                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
6093                 } else {
6094                         /* rx or tx causes */
6095                         idx = ((16 * (queue & 1)) + (8 * direction));
6096                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
6097                         tmp &= ~(0xFF << idx);
6098                         tmp |= (msix_vector << idx);
6099                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
6100                 }
6101         }
6102 }
6103
6104 static void
6105 ixgbevf_configure_msix(struct rte_eth_dev *dev)
6106 {
6107         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6108         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6109         struct ixgbe_hw *hw =
6110                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6111         uint32_t q_idx;
6112         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
6113         uint32_t base = IXGBE_MISC_VEC_ID;
6114
6115         /* Configure VF other cause ivar */
6116         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
6117
6118         /* won't configure msix register if no mapping is done
6119          * between intr vector and event fd.
6120          */
6121         if (!rte_intr_dp_is_en(intr_handle))
6122                 return;
6123
6124         if (rte_intr_allow_others(intr_handle)) {
6125                 base = IXGBE_RX_VEC_START;
6126                 vector_idx = IXGBE_RX_VEC_START;
6127         }
6128
6129         /* Configure all RX queues of VF */
6130         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
6131                 /* Force all queue use vector 0,
6132                  * as IXGBE_VF_MAXMSIVECOTR = 1
6133                  */
6134                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
6135                 intr_handle->intr_vec[q_idx] = vector_idx;
6136                 if (vector_idx < base + intr_handle->nb_efd - 1)
6137                         vector_idx++;
6138         }
6139
6140         /* As RX queue setting above show, all queues use the vector 0.
6141          * Set only the ITR value of IXGBE_MISC_VEC_ID.
6142          */
6143         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
6144                         IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6145                         | IXGBE_EITR_CNT_WDIS);
6146 }
6147
6148 /**
6149  * Sets up the hardware to properly generate MSI-X interrupts
6150  * @hw
6151  *  board private structure
6152  */
6153 static void
6154 ixgbe_configure_msix(struct rte_eth_dev *dev)
6155 {
6156         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6157         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6158         struct ixgbe_hw *hw =
6159                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6160         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
6161         uint32_t vec = IXGBE_MISC_VEC_ID;
6162         uint32_t mask;
6163         uint32_t gpie;
6164
6165         /* won't configure msix register if no mapping is done
6166          * between intr vector and event fd
6167          * but if misx has been enabled already, need to configure
6168          * auto clean, auto mask and throttling.
6169          */
6170         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6171         if (!rte_intr_dp_is_en(intr_handle) &&
6172             !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
6173                 return;
6174
6175         if (rte_intr_allow_others(intr_handle))
6176                 vec = base = IXGBE_RX_VEC_START;
6177
6178         /* setup GPIE for MSI-x mode */
6179         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6180         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
6181                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
6182         /* auto clearing and auto setting corresponding bits in EIMS
6183          * when MSI-X interrupt is triggered
6184          */
6185         if (hw->mac.type == ixgbe_mac_82598EB) {
6186                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
6187         } else {
6188                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
6189                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
6190         }
6191         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
6192
6193         /* Populate the IVAR table and set the ITR values to the
6194          * corresponding register.
6195          */
6196         if (rte_intr_dp_is_en(intr_handle)) {
6197                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
6198                         queue_id++) {
6199                         /* by default, 1:1 mapping */
6200                         ixgbe_set_ivar_map(hw, 0, queue_id, vec);
6201                         intr_handle->intr_vec[queue_id] = vec;
6202                         if (vec < base + intr_handle->nb_efd - 1)
6203                                 vec++;
6204                 }
6205
6206                 switch (hw->mac.type) {
6207                 case ixgbe_mac_82598EB:
6208                         ixgbe_set_ivar_map(hw, -1,
6209                                            IXGBE_IVAR_OTHER_CAUSES_INDEX,
6210                                            IXGBE_MISC_VEC_ID);
6211                         break;
6212                 case ixgbe_mac_82599EB:
6213                 case ixgbe_mac_X540:
6214                 case ixgbe_mac_X550:
6215                 case ixgbe_mac_X550EM_x:
6216                         ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
6217                         break;
6218                 default:
6219                         break;
6220                 }
6221         }
6222         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
6223                         IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6224                         | IXGBE_EITR_CNT_WDIS);
6225
6226         /* set up to autoclear timer, and the vectors */
6227         mask = IXGBE_EIMS_ENABLE_MASK;
6228         mask &= ~(IXGBE_EIMS_OTHER |
6229                   IXGBE_EIMS_MAILBOX |
6230                   IXGBE_EIMS_LSC);
6231
6232         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
6233 }
6234
6235 int
6236 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
6237                            uint16_t queue_idx, uint16_t tx_rate)
6238 {
6239         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6240         struct rte_eth_rxmode *rxmode;
6241         uint32_t rf_dec, rf_int;
6242         uint32_t bcnrc_val;
6243         uint16_t link_speed = dev->data->dev_link.link_speed;
6244
6245         if (queue_idx >= hw->mac.max_tx_queues)
6246                 return -EINVAL;
6247
6248         if (tx_rate != 0) {
6249                 /* Calculate the rate factor values to set */
6250                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
6251                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
6252                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
6253
6254                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
6255                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
6256                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
6257                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
6258         } else {
6259                 bcnrc_val = 0;
6260         }
6261
6262         rxmode = &dev->data->dev_conf.rxmode;
6263         /*
6264          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6265          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
6266          * set as 0x4.
6267          */
6268         if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
6269             (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
6270                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6271                         IXGBE_MMW_SIZE_JUMBO_FRAME);
6272         else
6273                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6274                         IXGBE_MMW_SIZE_DEFAULT);
6275
6276         /* Set RTTBCNRC of queue X */
6277         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
6278         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
6279         IXGBE_WRITE_FLUSH(hw);
6280
6281         return 0;
6282 }
6283
6284 static int
6285 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
6286                      __rte_unused uint32_t index,
6287                      __rte_unused uint32_t pool)
6288 {
6289         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6290         int diag;
6291
6292         /*
6293          * On a 82599 VF, adding again the same MAC addr is not an idempotent
6294          * operation. Trap this case to avoid exhausting the [very limited]
6295          * set of PF resources used to store VF MAC addresses.
6296          */
6297         if (memcmp(hw->mac.perm_addr, mac_addr,
6298                         sizeof(struct rte_ether_addr)) == 0)
6299                 return -1;
6300         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6301         if (diag != 0)
6302                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
6303                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
6304                             mac_addr->addr_bytes[0],
6305                             mac_addr->addr_bytes[1],
6306                             mac_addr->addr_bytes[2],
6307                             mac_addr->addr_bytes[3],
6308                             mac_addr->addr_bytes[4],
6309                             mac_addr->addr_bytes[5],
6310                             diag);
6311         return diag;
6312 }
6313
6314 static void
6315 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
6316 {
6317         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6318         struct rte_ether_addr *perm_addr =
6319                 (struct rte_ether_addr *)hw->mac.perm_addr;
6320         struct rte_ether_addr *mac_addr;
6321         uint32_t i;
6322         int diag;
6323
6324         /*
6325          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
6326          * not support the deletion of a given MAC address.
6327          * Instead, it imposes to delete all MAC addresses, then to add again
6328          * all MAC addresses with the exception of the one to be deleted.
6329          */
6330         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
6331
6332         /*
6333          * Add again all MAC addresses, with the exception of the deleted one
6334          * and of the permanent MAC address.
6335          */
6336         for (i = 0, mac_addr = dev->data->mac_addrs;
6337              i < hw->mac.num_rar_entries; i++, mac_addr++) {
6338                 /* Skip the deleted MAC address */
6339                 if (i == index)
6340                         continue;
6341                 /* Skip NULL MAC addresses */
6342                 if (rte_is_zero_ether_addr(mac_addr))
6343                         continue;
6344                 /* Skip the permanent MAC address */
6345                 if (memcmp(perm_addr, mac_addr,
6346                                 sizeof(struct rte_ether_addr)) == 0)
6347                         continue;
6348                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6349                 if (diag != 0)
6350                         PMD_DRV_LOG(ERR,
6351                                     "Adding again MAC address "
6352                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
6353                                     "diag=%d",
6354                                     mac_addr->addr_bytes[0],
6355                                     mac_addr->addr_bytes[1],
6356                                     mac_addr->addr_bytes[2],
6357                                     mac_addr->addr_bytes[3],
6358                                     mac_addr->addr_bytes[4],
6359                                     mac_addr->addr_bytes[5],
6360                                     diag);
6361         }
6362 }
6363
6364 static int
6365 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
6366                         struct rte_ether_addr *addr)
6367 {
6368         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6369
6370         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
6371
6372         return 0;
6373 }
6374
6375 int
6376 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
6377                         struct rte_eth_syn_filter *filter,
6378                         bool add)
6379 {
6380         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6381         struct ixgbe_filter_info *filter_info =
6382                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6383         uint32_t syn_info;
6384         uint32_t synqf;
6385
6386         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6387                 return -EINVAL;
6388
6389         syn_info = filter_info->syn_info;
6390
6391         if (add) {
6392                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6393                         return -EINVAL;
6394                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6395                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6396
6397                 if (filter->hig_pri)
6398                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
6399                 else
6400                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6401         } else {
6402                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6403                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6404                         return -ENOENT;
6405                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6406         }
6407
6408         filter_info->syn_info = synqf;
6409         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6410         IXGBE_WRITE_FLUSH(hw);
6411         return 0;
6412 }
6413
6414 static int
6415 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6416                         struct rte_eth_syn_filter *filter)
6417 {
6418         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6419         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6420
6421         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6422                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6423                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6424                 return 0;
6425         }
6426         return -ENOENT;
6427 }
6428
6429 static int
6430 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6431                         enum rte_filter_op filter_op,
6432                         void *arg)
6433 {
6434         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6435         int ret;
6436
6437         MAC_TYPE_FILTER_SUP(hw->mac.type);
6438
6439         if (filter_op == RTE_ETH_FILTER_NOP)
6440                 return 0;
6441
6442         if (arg == NULL) {
6443                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6444                             filter_op);
6445                 return -EINVAL;
6446         }
6447
6448         switch (filter_op) {
6449         case RTE_ETH_FILTER_ADD:
6450                 ret = ixgbe_syn_filter_set(dev,
6451                                 (struct rte_eth_syn_filter *)arg,
6452                                 TRUE);
6453                 break;
6454         case RTE_ETH_FILTER_DELETE:
6455                 ret = ixgbe_syn_filter_set(dev,
6456                                 (struct rte_eth_syn_filter *)arg,
6457                                 FALSE);
6458                 break;
6459         case RTE_ETH_FILTER_GET:
6460                 ret = ixgbe_syn_filter_get(dev,
6461                                 (struct rte_eth_syn_filter *)arg);
6462                 break;
6463         default:
6464                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6465                 ret = -EINVAL;
6466                 break;
6467         }
6468
6469         return ret;
6470 }
6471
6472
6473 static inline enum ixgbe_5tuple_protocol
6474 convert_protocol_type(uint8_t protocol_value)
6475 {
6476         if (protocol_value == IPPROTO_TCP)
6477                 return IXGBE_FILTER_PROTOCOL_TCP;
6478         else if (protocol_value == IPPROTO_UDP)
6479                 return IXGBE_FILTER_PROTOCOL_UDP;
6480         else if (protocol_value == IPPROTO_SCTP)
6481                 return IXGBE_FILTER_PROTOCOL_SCTP;
6482         else
6483                 return IXGBE_FILTER_PROTOCOL_NONE;
6484 }
6485
6486 /* inject a 5-tuple filter to HW */
6487 static inline void
6488 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6489                            struct ixgbe_5tuple_filter *filter)
6490 {
6491         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6492         int i;
6493         uint32_t ftqf, sdpqf;
6494         uint32_t l34timir = 0;
6495         uint8_t mask = 0xff;
6496
6497         i = filter->index;
6498
6499         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6500                                 IXGBE_SDPQF_DSTPORT_SHIFT);
6501         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6502
6503         ftqf = (uint32_t)(filter->filter_info.proto &
6504                 IXGBE_FTQF_PROTOCOL_MASK);
6505         ftqf |= (uint32_t)((filter->filter_info.priority &
6506                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6507         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6508                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6509         if (filter->filter_info.dst_ip_mask == 0)
6510                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6511         if (filter->filter_info.src_port_mask == 0)
6512                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6513         if (filter->filter_info.dst_port_mask == 0)
6514                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
6515         if (filter->filter_info.proto_mask == 0)
6516                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6517         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6518         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6519         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6520
6521         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6522         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6523         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6524         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6525
6526         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6527         l34timir |= (uint32_t)(filter->queue <<
6528                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6529         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6530 }
6531
6532 /*
6533  * add a 5tuple filter
6534  *
6535  * @param
6536  * dev: Pointer to struct rte_eth_dev.
6537  * index: the index the filter allocates.
6538  * filter: ponter to the filter that will be added.
6539  * rx_queue: the queue id the filter assigned to.
6540  *
6541  * @return
6542  *    - On success, zero.
6543  *    - On failure, a negative value.
6544  */
6545 static int
6546 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6547                         struct ixgbe_5tuple_filter *filter)
6548 {
6549         struct ixgbe_filter_info *filter_info =
6550                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6551         int i, idx, shift;
6552
6553         /*
6554          * look for an unused 5tuple filter index,
6555          * and insert the filter to list.
6556          */
6557         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6558                 idx = i / (sizeof(uint32_t) * NBBY);
6559                 shift = i % (sizeof(uint32_t) * NBBY);
6560                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6561                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6562                         filter->index = i;
6563                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6564                                           filter,
6565                                           entries);
6566                         break;
6567                 }
6568         }
6569         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6570                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6571                 return -ENOSYS;
6572         }
6573
6574         ixgbe_inject_5tuple_filter(dev, filter);
6575
6576         return 0;
6577 }
6578
6579 /*
6580  * remove a 5tuple filter
6581  *
6582  * @param
6583  * dev: Pointer to struct rte_eth_dev.
6584  * filter: the pointer of the filter will be removed.
6585  */
6586 static void
6587 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6588                         struct ixgbe_5tuple_filter *filter)
6589 {
6590         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6591         struct ixgbe_filter_info *filter_info =
6592                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6593         uint16_t index = filter->index;
6594
6595         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6596                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6597         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6598         rte_free(filter);
6599
6600         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6601         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6602         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6603         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6604         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6605 }
6606
6607 static int
6608 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6609 {
6610         struct ixgbe_hw *hw;
6611         uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD;
6612         struct rte_eth_dev_data *dev_data = dev->data;
6613
6614         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6615
6616         if (mtu < RTE_ETHER_MIN_MTU ||
6617                         max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
6618                 return -EINVAL;
6619
6620         /* If device is started, refuse mtu that requires the support of
6621          * scattered packets when this feature has not been enabled before.
6622          */
6623         if (dev_data->dev_started && !dev_data->scattered_rx &&
6624             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6625              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
6626                 PMD_INIT_LOG(ERR, "Stop port first.");
6627                 return -EINVAL;
6628         }
6629
6630         /*
6631          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6632          * request of the version 2.0 of the mailbox API.
6633          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6634          * of the mailbox API.
6635          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6636          * prior to 3.11.33 which contains the following change:
6637          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6638          */
6639         ixgbevf_rlpml_set_vf(hw, max_frame);
6640
6641         /* update max frame size */
6642         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6643         return 0;
6644 }
6645
6646 static inline struct ixgbe_5tuple_filter *
6647 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6648                         struct ixgbe_5tuple_filter_info *key)
6649 {
6650         struct ixgbe_5tuple_filter *it;
6651
6652         TAILQ_FOREACH(it, filter_list, entries) {
6653                 if (memcmp(key, &it->filter_info,
6654                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6655                         return it;
6656                 }
6657         }
6658         return NULL;
6659 }
6660
6661 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6662 static inline int
6663 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6664                         struct ixgbe_5tuple_filter_info *filter_info)
6665 {
6666         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6667                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6668                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6669                 return -EINVAL;
6670
6671         switch (filter->dst_ip_mask) {
6672         case UINT32_MAX:
6673                 filter_info->dst_ip_mask = 0;
6674                 filter_info->dst_ip = filter->dst_ip;
6675                 break;
6676         case 0:
6677                 filter_info->dst_ip_mask = 1;
6678                 break;
6679         default:
6680                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6681                 return -EINVAL;
6682         }
6683
6684         switch (filter->src_ip_mask) {
6685         case UINT32_MAX:
6686                 filter_info->src_ip_mask = 0;
6687                 filter_info->src_ip = filter->src_ip;
6688                 break;
6689         case 0:
6690                 filter_info->src_ip_mask = 1;
6691                 break;
6692         default:
6693                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6694                 return -EINVAL;
6695         }
6696
6697         switch (filter->dst_port_mask) {
6698         case UINT16_MAX:
6699                 filter_info->dst_port_mask = 0;
6700                 filter_info->dst_port = filter->dst_port;
6701                 break;
6702         case 0:
6703                 filter_info->dst_port_mask = 1;
6704                 break;
6705         default:
6706                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6707                 return -EINVAL;
6708         }
6709
6710         switch (filter->src_port_mask) {
6711         case UINT16_MAX:
6712                 filter_info->src_port_mask = 0;
6713                 filter_info->src_port = filter->src_port;
6714                 break;
6715         case 0:
6716                 filter_info->src_port_mask = 1;
6717                 break;
6718         default:
6719                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6720                 return -EINVAL;
6721         }
6722
6723         switch (filter->proto_mask) {
6724         case UINT8_MAX:
6725                 filter_info->proto_mask = 0;
6726                 filter_info->proto =
6727                         convert_protocol_type(filter->proto);
6728                 break;
6729         case 0:
6730                 filter_info->proto_mask = 1;
6731                 break;
6732         default:
6733                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6734                 return -EINVAL;
6735         }
6736
6737         filter_info->priority = (uint8_t)filter->priority;
6738         return 0;
6739 }
6740
6741 /*
6742  * add or delete a ntuple filter
6743  *
6744  * @param
6745  * dev: Pointer to struct rte_eth_dev.
6746  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6747  * add: if true, add filter, if false, remove filter
6748  *
6749  * @return
6750  *    - On success, zero.
6751  *    - On failure, a negative value.
6752  */
6753 int
6754 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6755                         struct rte_eth_ntuple_filter *ntuple_filter,
6756                         bool add)
6757 {
6758         struct ixgbe_filter_info *filter_info =
6759                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6760         struct ixgbe_5tuple_filter_info filter_5tuple;
6761         struct ixgbe_5tuple_filter *filter;
6762         int ret;
6763
6764         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6765                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6766                 return -EINVAL;
6767         }
6768
6769         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6770         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6771         if (ret < 0)
6772                 return ret;
6773
6774         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6775                                          &filter_5tuple);
6776         if (filter != NULL && add) {
6777                 PMD_DRV_LOG(ERR, "filter exists.");
6778                 return -EEXIST;
6779         }
6780         if (filter == NULL && !add) {
6781                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6782                 return -ENOENT;
6783         }
6784
6785         if (add) {
6786                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6787                                 sizeof(struct ixgbe_5tuple_filter), 0);
6788                 if (filter == NULL)
6789                         return -ENOMEM;
6790                 rte_memcpy(&filter->filter_info,
6791                                  &filter_5tuple,
6792                                  sizeof(struct ixgbe_5tuple_filter_info));
6793                 filter->queue = ntuple_filter->queue;
6794                 ret = ixgbe_add_5tuple_filter(dev, filter);
6795                 if (ret < 0) {
6796                         rte_free(filter);
6797                         return ret;
6798                 }
6799         } else
6800                 ixgbe_remove_5tuple_filter(dev, filter);
6801
6802         return 0;
6803 }
6804
6805 /*
6806  * get a ntuple filter
6807  *
6808  * @param
6809  * dev: Pointer to struct rte_eth_dev.
6810  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6811  *
6812  * @return
6813  *    - On success, zero.
6814  *    - On failure, a negative value.
6815  */
6816 static int
6817 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6818                         struct rte_eth_ntuple_filter *ntuple_filter)
6819 {
6820         struct ixgbe_filter_info *filter_info =
6821                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6822         struct ixgbe_5tuple_filter_info filter_5tuple;
6823         struct ixgbe_5tuple_filter *filter;
6824         int ret;
6825
6826         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6827                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6828                 return -EINVAL;
6829         }
6830
6831         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6832         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6833         if (ret < 0)
6834                 return ret;
6835
6836         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6837                                          &filter_5tuple);
6838         if (filter == NULL) {
6839                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6840                 return -ENOENT;
6841         }
6842         ntuple_filter->queue = filter->queue;
6843         return 0;
6844 }
6845
6846 /*
6847  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6848  * @dev: pointer to rte_eth_dev structure
6849  * @filter_op:operation will be taken.
6850  * @arg: a pointer to specific structure corresponding to the filter_op
6851  *
6852  * @return
6853  *    - On success, zero.
6854  *    - On failure, a negative value.
6855  */
6856 static int
6857 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6858                                 enum rte_filter_op filter_op,
6859                                 void *arg)
6860 {
6861         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6862         int ret;
6863
6864         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6865
6866         if (filter_op == RTE_ETH_FILTER_NOP)
6867                 return 0;
6868
6869         if (arg == NULL) {
6870                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6871                             filter_op);
6872                 return -EINVAL;
6873         }
6874
6875         switch (filter_op) {
6876         case RTE_ETH_FILTER_ADD:
6877                 ret = ixgbe_add_del_ntuple_filter(dev,
6878                         (struct rte_eth_ntuple_filter *)arg,
6879                         TRUE);
6880                 break;
6881         case RTE_ETH_FILTER_DELETE:
6882                 ret = ixgbe_add_del_ntuple_filter(dev,
6883                         (struct rte_eth_ntuple_filter *)arg,
6884                         FALSE);
6885                 break;
6886         case RTE_ETH_FILTER_GET:
6887                 ret = ixgbe_get_ntuple_filter(dev,
6888                         (struct rte_eth_ntuple_filter *)arg);
6889                 break;
6890         default:
6891                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6892                 ret = -EINVAL;
6893                 break;
6894         }
6895         return ret;
6896 }
6897
6898 int
6899 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6900                         struct rte_eth_ethertype_filter *filter,
6901                         bool add)
6902 {
6903         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6904         struct ixgbe_filter_info *filter_info =
6905                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6906         uint32_t etqf = 0;
6907         uint32_t etqs = 0;
6908         int ret;
6909         struct ixgbe_ethertype_filter ethertype_filter;
6910
6911         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6912                 return -EINVAL;
6913
6914         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
6915                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
6916                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6917                         " ethertype filter.", filter->ether_type);
6918                 return -EINVAL;
6919         }
6920
6921         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6922                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6923                 return -EINVAL;
6924         }
6925         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6926                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6927                 return -EINVAL;
6928         }
6929
6930         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6931         if (ret >= 0 && add) {
6932                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6933                             filter->ether_type);
6934                 return -EEXIST;
6935         }
6936         if (ret < 0 && !add) {
6937                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6938                             filter->ether_type);
6939                 return -ENOENT;
6940         }
6941
6942         if (add) {
6943                 etqf = IXGBE_ETQF_FILTER_EN;
6944                 etqf |= (uint32_t)filter->ether_type;
6945                 etqs |= (uint32_t)((filter->queue <<
6946                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6947                                     IXGBE_ETQS_RX_QUEUE);
6948                 etqs |= IXGBE_ETQS_QUEUE_EN;
6949
6950                 ethertype_filter.ethertype = filter->ether_type;
6951                 ethertype_filter.etqf = etqf;
6952                 ethertype_filter.etqs = etqs;
6953                 ethertype_filter.conf = FALSE;
6954                 ret = ixgbe_ethertype_filter_insert(filter_info,
6955                                                     &ethertype_filter);
6956                 if (ret < 0) {
6957                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6958                         return -ENOSPC;
6959                 }
6960         } else {
6961                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6962                 if (ret < 0)
6963                         return -ENOSYS;
6964         }
6965         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6966         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6967         IXGBE_WRITE_FLUSH(hw);
6968
6969         return 0;
6970 }
6971
6972 static int
6973 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6974                         struct rte_eth_ethertype_filter *filter)
6975 {
6976         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6977         struct ixgbe_filter_info *filter_info =
6978                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6979         uint32_t etqf, etqs;
6980         int ret;
6981
6982         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6983         if (ret < 0) {
6984                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6985                             filter->ether_type);
6986                 return -ENOENT;
6987         }
6988
6989         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6990         if (etqf & IXGBE_ETQF_FILTER_EN) {
6991                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6992                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6993                 filter->flags = 0;
6994                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6995                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6996                 return 0;
6997         }
6998         return -ENOENT;
6999 }
7000
7001 /*
7002  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
7003  * @dev: pointer to rte_eth_dev structure
7004  * @filter_op:operation will be taken.
7005  * @arg: a pointer to specific structure corresponding to the filter_op
7006  */
7007 static int
7008 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
7009                                 enum rte_filter_op filter_op,
7010                                 void *arg)
7011 {
7012         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7013         int ret;
7014
7015         MAC_TYPE_FILTER_SUP(hw->mac.type);
7016
7017         if (filter_op == RTE_ETH_FILTER_NOP)
7018                 return 0;
7019
7020         if (arg == NULL) {
7021                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7022                             filter_op);
7023                 return -EINVAL;
7024         }
7025
7026         switch (filter_op) {
7027         case RTE_ETH_FILTER_ADD:
7028                 ret = ixgbe_add_del_ethertype_filter(dev,
7029                         (struct rte_eth_ethertype_filter *)arg,
7030                         TRUE);
7031                 break;
7032         case RTE_ETH_FILTER_DELETE:
7033                 ret = ixgbe_add_del_ethertype_filter(dev,
7034                         (struct rte_eth_ethertype_filter *)arg,
7035                         FALSE);
7036                 break;
7037         case RTE_ETH_FILTER_GET:
7038                 ret = ixgbe_get_ethertype_filter(dev,
7039                         (struct rte_eth_ethertype_filter *)arg);
7040                 break;
7041         default:
7042                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7043                 ret = -EINVAL;
7044                 break;
7045         }
7046         return ret;
7047 }
7048
7049 static int
7050 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
7051                      enum rte_filter_type filter_type,
7052                      enum rte_filter_op filter_op,
7053                      void *arg)
7054 {
7055         int ret = 0;
7056
7057         switch (filter_type) {
7058         case RTE_ETH_FILTER_NTUPLE:
7059                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
7060                 break;
7061         case RTE_ETH_FILTER_ETHERTYPE:
7062                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
7063                 break;
7064         case RTE_ETH_FILTER_SYN:
7065                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
7066                 break;
7067         case RTE_ETH_FILTER_FDIR:
7068                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
7069                 break;
7070         case RTE_ETH_FILTER_L2_TUNNEL:
7071                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
7072                 break;
7073         case RTE_ETH_FILTER_GENERIC:
7074                 if (filter_op != RTE_ETH_FILTER_GET)
7075                         return -EINVAL;
7076                 *(const void **)arg = &ixgbe_flow_ops;
7077                 break;
7078         default:
7079                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
7080                                                         filter_type);
7081                 ret = -EINVAL;
7082                 break;
7083         }
7084
7085         return ret;
7086 }
7087
7088 static u8 *
7089 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw,
7090                         u8 **mc_addr_ptr, u32 *vmdq)
7091 {
7092         u8 *mc_addr;
7093
7094         *vmdq = 0;
7095         mc_addr = *mc_addr_ptr;
7096         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
7097         return mc_addr;
7098 }
7099
7100 static int
7101 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
7102                           struct rte_ether_addr *mc_addr_set,
7103                           uint32_t nb_mc_addr)
7104 {
7105         struct ixgbe_hw *hw;
7106         u8 *mc_addr_list;
7107
7108         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7109         mc_addr_list = (u8 *)mc_addr_set;
7110         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
7111                                          ixgbe_dev_addr_list_itr, TRUE);
7112 }
7113
7114 static uint64_t
7115 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
7116 {
7117         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7118         uint64_t systime_cycles;
7119
7120         switch (hw->mac.type) {
7121         case ixgbe_mac_X550:
7122         case ixgbe_mac_X550EM_x:
7123         case ixgbe_mac_X550EM_a:
7124                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
7125                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
7126                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
7127                                 * NSEC_PER_SEC;
7128                 break;
7129         default:
7130                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
7131                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
7132                                 << 32;
7133         }
7134
7135         return systime_cycles;
7136 }
7137
7138 static uint64_t
7139 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
7140 {
7141         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7142         uint64_t rx_tstamp_cycles;
7143
7144         switch (hw->mac.type) {
7145         case ixgbe_mac_X550:
7146         case ixgbe_mac_X550EM_x:
7147         case ixgbe_mac_X550EM_a:
7148                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
7149                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
7150                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
7151                                 * NSEC_PER_SEC;
7152                 break;
7153         default:
7154                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
7155                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
7156                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
7157                                 << 32;
7158         }
7159
7160         return rx_tstamp_cycles;
7161 }
7162
7163 static uint64_t
7164 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
7165 {
7166         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7167         uint64_t tx_tstamp_cycles;
7168
7169         switch (hw->mac.type) {
7170         case ixgbe_mac_X550:
7171         case ixgbe_mac_X550EM_x:
7172         case ixgbe_mac_X550EM_a:
7173                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
7174                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
7175                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
7176                                 * NSEC_PER_SEC;
7177                 break;
7178         default:
7179                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
7180                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
7181                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
7182                                 << 32;
7183         }
7184
7185         return tx_tstamp_cycles;
7186 }
7187
7188 static void
7189 ixgbe_start_timecounters(struct rte_eth_dev *dev)
7190 {
7191         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7192         struct ixgbe_adapter *adapter = dev->data->dev_private;
7193         struct rte_eth_link link;
7194         uint32_t incval = 0;
7195         uint32_t shift = 0;
7196
7197         /* Get current link speed. */
7198         ixgbe_dev_link_update(dev, 1);
7199         rte_eth_linkstatus_get(dev, &link);
7200
7201         switch (link.link_speed) {
7202         case ETH_SPEED_NUM_100M:
7203                 incval = IXGBE_INCVAL_100;
7204                 shift = IXGBE_INCVAL_SHIFT_100;
7205                 break;
7206         case ETH_SPEED_NUM_1G:
7207                 incval = IXGBE_INCVAL_1GB;
7208                 shift = IXGBE_INCVAL_SHIFT_1GB;
7209                 break;
7210         case ETH_SPEED_NUM_10G:
7211         default:
7212                 incval = IXGBE_INCVAL_10GB;
7213                 shift = IXGBE_INCVAL_SHIFT_10GB;
7214                 break;
7215         }
7216
7217         switch (hw->mac.type) {
7218         case ixgbe_mac_X550:
7219         case ixgbe_mac_X550EM_x:
7220         case ixgbe_mac_X550EM_a:
7221                 /* Independent of link speed. */
7222                 incval = 1;
7223                 /* Cycles read will be interpreted as ns. */
7224                 shift = 0;
7225                 /* Fall-through */
7226         case ixgbe_mac_X540:
7227                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
7228                 break;
7229         case ixgbe_mac_82599EB:
7230                 incval >>= IXGBE_INCVAL_SHIFT_82599;
7231                 shift -= IXGBE_INCVAL_SHIFT_82599;
7232                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
7233                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
7234                 break;
7235         default:
7236                 /* Not supported. */
7237                 return;
7238         }
7239
7240         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
7241         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7242         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7243
7244         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7245         adapter->systime_tc.cc_shift = shift;
7246         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
7247
7248         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7249         adapter->rx_tstamp_tc.cc_shift = shift;
7250         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7251
7252         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7253         adapter->tx_tstamp_tc.cc_shift = shift;
7254         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7255 }
7256
7257 static int
7258 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
7259 {
7260         struct ixgbe_adapter *adapter = dev->data->dev_private;
7261
7262         adapter->systime_tc.nsec += delta;
7263         adapter->rx_tstamp_tc.nsec += delta;
7264         adapter->tx_tstamp_tc.nsec += delta;
7265
7266         return 0;
7267 }
7268
7269 static int
7270 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
7271 {
7272         uint64_t ns;
7273         struct ixgbe_adapter *adapter = dev->data->dev_private;
7274
7275         ns = rte_timespec_to_ns(ts);
7276         /* Set the timecounters to a new value. */
7277         adapter->systime_tc.nsec = ns;
7278         adapter->rx_tstamp_tc.nsec = ns;
7279         adapter->tx_tstamp_tc.nsec = ns;
7280
7281         return 0;
7282 }
7283
7284 static int
7285 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
7286 {
7287         uint64_t ns, systime_cycles;
7288         struct ixgbe_adapter *adapter = dev->data->dev_private;
7289
7290         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
7291         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
7292         *ts = rte_ns_to_timespec(ns);
7293
7294         return 0;
7295 }
7296
7297 static int
7298 ixgbe_timesync_enable(struct rte_eth_dev *dev)
7299 {
7300         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7301         uint32_t tsync_ctl;
7302         uint32_t tsauxc;
7303
7304         /* Stop the timesync system time. */
7305         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
7306         /* Reset the timesync system time value. */
7307         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
7308         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
7309
7310         /* Enable system time for platforms where it isn't on by default. */
7311         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
7312         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
7313         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
7314
7315         ixgbe_start_timecounters(dev);
7316
7317         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7318         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
7319                         (RTE_ETHER_TYPE_1588 |
7320                          IXGBE_ETQF_FILTER_EN |
7321                          IXGBE_ETQF_1588));
7322
7323         /* Enable timestamping of received PTP packets. */
7324         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7325         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
7326         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7327
7328         /* Enable timestamping of transmitted PTP packets. */
7329         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7330         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
7331         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7332
7333         IXGBE_WRITE_FLUSH(hw);
7334
7335         return 0;
7336 }
7337
7338 static int
7339 ixgbe_timesync_disable(struct rte_eth_dev *dev)
7340 {
7341         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7342         uint32_t tsync_ctl;
7343
7344         /* Disable timestamping of transmitted PTP packets. */
7345         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7346         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
7347         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7348
7349         /* Disable timestamping of received PTP packets. */
7350         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7351         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
7352         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7353
7354         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7355         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
7356
7357         /* Stop incrementating the System Time registers. */
7358         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
7359
7360         return 0;
7361 }
7362
7363 static int
7364 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7365                                  struct timespec *timestamp,
7366                                  uint32_t flags __rte_unused)
7367 {
7368         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7369         struct ixgbe_adapter *adapter = dev->data->dev_private;
7370         uint32_t tsync_rxctl;
7371         uint64_t rx_tstamp_cycles;
7372         uint64_t ns;
7373
7374         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7375         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
7376                 return -EINVAL;
7377
7378         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
7379         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
7380         *timestamp = rte_ns_to_timespec(ns);
7381
7382         return  0;
7383 }
7384
7385 static int
7386 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7387                                  struct timespec *timestamp)
7388 {
7389         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7390         struct ixgbe_adapter *adapter = dev->data->dev_private;
7391         uint32_t tsync_txctl;
7392         uint64_t tx_tstamp_cycles;
7393         uint64_t ns;
7394
7395         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7396         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7397                 return -EINVAL;
7398
7399         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7400         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7401         *timestamp = rte_ns_to_timespec(ns);
7402
7403         return 0;
7404 }
7405
7406 static int
7407 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7408 {
7409         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7410         int count = 0;
7411         int g_ind = 0;
7412         const struct reg_info *reg_group;
7413         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7414                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7415
7416         while ((reg_group = reg_set[g_ind++]))
7417                 count += ixgbe_regs_group_count(reg_group);
7418
7419         return count;
7420 }
7421
7422 static int
7423 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7424 {
7425         int count = 0;
7426         int g_ind = 0;
7427         const struct reg_info *reg_group;
7428
7429         while ((reg_group = ixgbevf_regs[g_ind++]))
7430                 count += ixgbe_regs_group_count(reg_group);
7431
7432         return count;
7433 }
7434
7435 static int
7436 ixgbe_get_regs(struct rte_eth_dev *dev,
7437               struct rte_dev_reg_info *regs)
7438 {
7439         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7440         uint32_t *data = regs->data;
7441         int g_ind = 0;
7442         int count = 0;
7443         const struct reg_info *reg_group;
7444         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7445                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7446
7447         if (data == NULL) {
7448                 regs->length = ixgbe_get_reg_length(dev);
7449                 regs->width = sizeof(uint32_t);
7450                 return 0;
7451         }
7452
7453         /* Support only full register dump */
7454         if ((regs->length == 0) ||
7455             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7456                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7457                         hw->device_id;
7458                 while ((reg_group = reg_set[g_ind++]))
7459                         count += ixgbe_read_regs_group(dev, &data[count],
7460                                 reg_group);
7461                 return 0;
7462         }
7463
7464         return -ENOTSUP;
7465 }
7466
7467 static int
7468 ixgbevf_get_regs(struct rte_eth_dev *dev,
7469                 struct rte_dev_reg_info *regs)
7470 {
7471         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7472         uint32_t *data = regs->data;
7473         int g_ind = 0;
7474         int count = 0;
7475         const struct reg_info *reg_group;
7476
7477         if (data == NULL) {
7478                 regs->length = ixgbevf_get_reg_length(dev);
7479                 regs->width = sizeof(uint32_t);
7480                 return 0;
7481         }
7482
7483         /* Support only full register dump */
7484         if ((regs->length == 0) ||
7485             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7486                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7487                         hw->device_id;
7488                 while ((reg_group = ixgbevf_regs[g_ind++]))
7489                         count += ixgbe_read_regs_group(dev, &data[count],
7490                                                       reg_group);
7491                 return 0;
7492         }
7493
7494         return -ENOTSUP;
7495 }
7496
7497 static int
7498 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7499 {
7500         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7501
7502         /* Return unit is byte count */
7503         return hw->eeprom.word_size * 2;
7504 }
7505
7506 static int
7507 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7508                 struct rte_dev_eeprom_info *in_eeprom)
7509 {
7510         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7511         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7512         uint16_t *data = in_eeprom->data;
7513         int first, length;
7514
7515         first = in_eeprom->offset >> 1;
7516         length = in_eeprom->length >> 1;
7517         if ((first > hw->eeprom.word_size) ||
7518             ((first + length) > hw->eeprom.word_size))
7519                 return -EINVAL;
7520
7521         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7522
7523         return eeprom->ops.read_buffer(hw, first, length, data);
7524 }
7525
7526 static int
7527 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7528                 struct rte_dev_eeprom_info *in_eeprom)
7529 {
7530         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7531         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7532         uint16_t *data = in_eeprom->data;
7533         int first, length;
7534
7535         first = in_eeprom->offset >> 1;
7536         length = in_eeprom->length >> 1;
7537         if ((first > hw->eeprom.word_size) ||
7538             ((first + length) > hw->eeprom.word_size))
7539                 return -EINVAL;
7540
7541         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7542
7543         return eeprom->ops.write_buffer(hw,  first, length, data);
7544 }
7545
7546 static int
7547 ixgbe_get_module_info(struct rte_eth_dev *dev,
7548                       struct rte_eth_dev_module_info *modinfo)
7549 {
7550         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7551         uint32_t status;
7552         uint8_t sff8472_rev, addr_mode;
7553         bool page_swap = false;
7554
7555         /* Check whether we support SFF-8472 or not */
7556         status = hw->phy.ops.read_i2c_eeprom(hw,
7557                                              IXGBE_SFF_SFF_8472_COMP,
7558                                              &sff8472_rev);
7559         if (status != 0)
7560                 return -EIO;
7561
7562         /* addressing mode is not supported */
7563         status = hw->phy.ops.read_i2c_eeprom(hw,
7564                                              IXGBE_SFF_SFF_8472_SWAP,
7565                                              &addr_mode);
7566         if (status != 0)
7567                 return -EIO;
7568
7569         if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
7570                 PMD_DRV_LOG(ERR,
7571                             "Address change required to access page 0xA2, "
7572                             "but not supported. Please report the module "
7573                             "type to the driver maintainers.");
7574                 page_swap = true;
7575         }
7576
7577         if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
7578                 /* We have a SFP, but it does not support SFF-8472 */
7579                 modinfo->type = RTE_ETH_MODULE_SFF_8079;
7580                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
7581         } else {
7582                 /* We have a SFP which supports a revision of SFF-8472. */
7583                 modinfo->type = RTE_ETH_MODULE_SFF_8472;
7584                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
7585         }
7586
7587         return 0;
7588 }
7589
7590 static int
7591 ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
7592                         struct rte_dev_eeprom_info *info)
7593 {
7594         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7595         uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
7596         uint8_t databyte = 0xFF;
7597         uint8_t *data = info->data;
7598         uint32_t i = 0;
7599
7600         if (info->length == 0)
7601                 return -EINVAL;
7602
7603         for (i = info->offset; i < info->offset + info->length; i++) {
7604                 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
7605                         status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
7606                 else
7607                         status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
7608
7609                 if (status != 0)
7610                         return -EIO;
7611
7612                 data[i - info->offset] = databyte;
7613         }
7614
7615         return 0;
7616 }
7617
7618 uint16_t
7619 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7620         switch (mac_type) {
7621         case ixgbe_mac_X550:
7622         case ixgbe_mac_X550EM_x:
7623         case ixgbe_mac_X550EM_a:
7624                 return ETH_RSS_RETA_SIZE_512;
7625         case ixgbe_mac_X550_vf:
7626         case ixgbe_mac_X550EM_x_vf:
7627         case ixgbe_mac_X550EM_a_vf:
7628                 return ETH_RSS_RETA_SIZE_64;
7629         case ixgbe_mac_X540_vf:
7630         case ixgbe_mac_82599_vf:
7631                 return 0;
7632         default:
7633                 return ETH_RSS_RETA_SIZE_128;
7634         }
7635 }
7636
7637 uint32_t
7638 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7639         switch (mac_type) {
7640         case ixgbe_mac_X550:
7641         case ixgbe_mac_X550EM_x:
7642         case ixgbe_mac_X550EM_a:
7643                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7644                         return IXGBE_RETA(reta_idx >> 2);
7645                 else
7646                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7647         case ixgbe_mac_X550_vf:
7648         case ixgbe_mac_X550EM_x_vf:
7649         case ixgbe_mac_X550EM_a_vf:
7650                 return IXGBE_VFRETA(reta_idx >> 2);
7651         default:
7652                 return IXGBE_RETA(reta_idx >> 2);
7653         }
7654 }
7655
7656 uint32_t
7657 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7658         switch (mac_type) {
7659         case ixgbe_mac_X550_vf:
7660         case ixgbe_mac_X550EM_x_vf:
7661         case ixgbe_mac_X550EM_a_vf:
7662                 return IXGBE_VFMRQC;
7663         default:
7664                 return IXGBE_MRQC;
7665         }
7666 }
7667
7668 uint32_t
7669 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7670         switch (mac_type) {
7671         case ixgbe_mac_X550_vf:
7672         case ixgbe_mac_X550EM_x_vf:
7673         case ixgbe_mac_X550EM_a_vf:
7674                 return IXGBE_VFRSSRK(i);
7675         default:
7676                 return IXGBE_RSSRK(i);
7677         }
7678 }
7679
7680 bool
7681 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7682         switch (mac_type) {
7683         case ixgbe_mac_82599_vf:
7684         case ixgbe_mac_X540_vf:
7685                 return 0;
7686         default:
7687                 return 1;
7688         }
7689 }
7690
7691 static int
7692 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7693                         struct rte_eth_dcb_info *dcb_info)
7694 {
7695         struct ixgbe_dcb_config *dcb_config =
7696                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7697         struct ixgbe_dcb_tc_config *tc;
7698         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
7699         uint8_t nb_tcs;
7700         uint8_t i, j;
7701
7702         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7703                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7704         else
7705                 dcb_info->nb_tcs = 1;
7706
7707         tc_queue = &dcb_info->tc_queue;
7708         nb_tcs = dcb_info->nb_tcs;
7709
7710         if (dcb_config->vt_mode) { /* vt is enabled*/
7711                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7712                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7713                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7714                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7715                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
7716                         for (j = 0; j < nb_tcs; j++) {
7717                                 tc_queue->tc_rxq[0][j].base = j;
7718                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
7719                                 tc_queue->tc_txq[0][j].base = j;
7720                                 tc_queue->tc_txq[0][j].nb_queue = 1;
7721                         }
7722                 } else {
7723                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7724                                 for (j = 0; j < nb_tcs; j++) {
7725                                         tc_queue->tc_rxq[i][j].base =
7726                                                 i * nb_tcs + j;
7727                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
7728                                         tc_queue->tc_txq[i][j].base =
7729                                                 i * nb_tcs + j;
7730                                         tc_queue->tc_txq[i][j].nb_queue = 1;
7731                                 }
7732                         }
7733                 }
7734         } else { /* vt is disabled*/
7735                 struct rte_eth_dcb_rx_conf *rx_conf =
7736                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7737                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7738                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7739                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7740                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7741                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7742                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7743                         }
7744                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7745                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7746                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7747                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7748                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7749                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7750                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7751                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7752                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7753                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7754                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7755                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7756                         }
7757                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7758                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7759                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7760                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7761                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7762                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7763                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7764                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7765                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7766                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7767                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7768                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7769                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7770                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7771                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7772                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7773                 }
7774         }
7775         for (i = 0; i < dcb_info->nb_tcs; i++) {
7776                 tc = &dcb_config->tc_config[i];
7777                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7778         }
7779         return 0;
7780 }
7781
7782 /* Update e-tag ether type */
7783 static int
7784 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7785                             uint16_t ether_type)
7786 {
7787         uint32_t etag_etype;
7788
7789         if (hw->mac.type != ixgbe_mac_X550 &&
7790             hw->mac.type != ixgbe_mac_X550EM_x &&
7791             hw->mac.type != ixgbe_mac_X550EM_a) {
7792                 return -ENOTSUP;
7793         }
7794
7795         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7796         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7797         etag_etype |= ether_type;
7798         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7799         IXGBE_WRITE_FLUSH(hw);
7800
7801         return 0;
7802 }
7803
7804 /* Config l2 tunnel ether type */
7805 static int
7806 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7807                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7808 {
7809         int ret = 0;
7810         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7811         struct ixgbe_l2_tn_info *l2_tn_info =
7812                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7813
7814         if (l2_tunnel == NULL)
7815                 return -EINVAL;
7816
7817         switch (l2_tunnel->l2_tunnel_type) {
7818         case RTE_L2_TUNNEL_TYPE_E_TAG:
7819                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7820                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7821                 break;
7822         default:
7823                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7824                 ret = -EINVAL;
7825                 break;
7826         }
7827
7828         return ret;
7829 }
7830
7831 /* Enable e-tag tunnel */
7832 static int
7833 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7834 {
7835         uint32_t etag_etype;
7836
7837         if (hw->mac.type != ixgbe_mac_X550 &&
7838             hw->mac.type != ixgbe_mac_X550EM_x &&
7839             hw->mac.type != ixgbe_mac_X550EM_a) {
7840                 return -ENOTSUP;
7841         }
7842
7843         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7844         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7845         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7846         IXGBE_WRITE_FLUSH(hw);
7847
7848         return 0;
7849 }
7850
7851 /* Enable l2 tunnel */
7852 static int
7853 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7854                            enum rte_eth_tunnel_type l2_tunnel_type)
7855 {
7856         int ret = 0;
7857         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7858         struct ixgbe_l2_tn_info *l2_tn_info =
7859                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7860
7861         switch (l2_tunnel_type) {
7862         case RTE_L2_TUNNEL_TYPE_E_TAG:
7863                 l2_tn_info->e_tag_en = TRUE;
7864                 ret = ixgbe_e_tag_enable(hw);
7865                 break;
7866         default:
7867                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7868                 ret = -EINVAL;
7869                 break;
7870         }
7871
7872         return ret;
7873 }
7874
7875 /* Disable e-tag tunnel */
7876 static int
7877 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7878 {
7879         uint32_t etag_etype;
7880
7881         if (hw->mac.type != ixgbe_mac_X550 &&
7882             hw->mac.type != ixgbe_mac_X550EM_x &&
7883             hw->mac.type != ixgbe_mac_X550EM_a) {
7884                 return -ENOTSUP;
7885         }
7886
7887         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7888         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7889         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7890         IXGBE_WRITE_FLUSH(hw);
7891
7892         return 0;
7893 }
7894
7895 /* Disable l2 tunnel */
7896 static int
7897 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7898                             enum rte_eth_tunnel_type l2_tunnel_type)
7899 {
7900         int ret = 0;
7901         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7902         struct ixgbe_l2_tn_info *l2_tn_info =
7903                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7904
7905         switch (l2_tunnel_type) {
7906         case RTE_L2_TUNNEL_TYPE_E_TAG:
7907                 l2_tn_info->e_tag_en = FALSE;
7908                 ret = ixgbe_e_tag_disable(hw);
7909                 break;
7910         default:
7911                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7912                 ret = -EINVAL;
7913                 break;
7914         }
7915
7916         return ret;
7917 }
7918
7919 static int
7920 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7921                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7922 {
7923         int ret = 0;
7924         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7925         uint32_t i, rar_entries;
7926         uint32_t rar_low, rar_high;
7927
7928         if (hw->mac.type != ixgbe_mac_X550 &&
7929             hw->mac.type != ixgbe_mac_X550EM_x &&
7930             hw->mac.type != ixgbe_mac_X550EM_a) {
7931                 return -ENOTSUP;
7932         }
7933
7934         rar_entries = ixgbe_get_num_rx_addrs(hw);
7935
7936         for (i = 1; i < rar_entries; i++) {
7937                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7938                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7939                 if ((rar_high & IXGBE_RAH_AV) &&
7940                     (rar_high & IXGBE_RAH_ADTYPE) &&
7941                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7942                      l2_tunnel->tunnel_id)) {
7943                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7944                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7945
7946                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7947
7948                         return ret;
7949                 }
7950         }
7951
7952         return ret;
7953 }
7954
7955 static int
7956 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7957                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7958 {
7959         int ret = 0;
7960         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7961         uint32_t i, rar_entries;
7962         uint32_t rar_low, rar_high;
7963
7964         if (hw->mac.type != ixgbe_mac_X550 &&
7965             hw->mac.type != ixgbe_mac_X550EM_x &&
7966             hw->mac.type != ixgbe_mac_X550EM_a) {
7967                 return -ENOTSUP;
7968         }
7969
7970         /* One entry for one tunnel. Try to remove potential existing entry. */
7971         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7972
7973         rar_entries = ixgbe_get_num_rx_addrs(hw);
7974
7975         for (i = 1; i < rar_entries; i++) {
7976                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7977                 if (rar_high & IXGBE_RAH_AV) {
7978                         continue;
7979                 } else {
7980                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7981                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7982                         rar_low = l2_tunnel->tunnel_id;
7983
7984                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7985                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7986
7987                         return ret;
7988                 }
7989         }
7990
7991         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7992                      " Please remove a rule before adding a new one.");
7993         return -EINVAL;
7994 }
7995
7996 static inline struct ixgbe_l2_tn_filter *
7997 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7998                           struct ixgbe_l2_tn_key *key)
7999 {
8000         int ret;
8001
8002         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
8003         if (ret < 0)
8004                 return NULL;
8005
8006         return l2_tn_info->hash_map[ret];
8007 }
8008
8009 static inline int
8010 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
8011                           struct ixgbe_l2_tn_filter *l2_tn_filter)
8012 {
8013         int ret;
8014
8015         ret = rte_hash_add_key(l2_tn_info->hash_handle,
8016                                &l2_tn_filter->key);
8017
8018         if (ret < 0) {
8019                 PMD_DRV_LOG(ERR,
8020                             "Failed to insert L2 tunnel filter"
8021                             " to hash table %d!",
8022                             ret);
8023                 return ret;
8024         }
8025
8026         l2_tn_info->hash_map[ret] = l2_tn_filter;
8027
8028         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
8029
8030         return 0;
8031 }
8032
8033 static inline int
8034 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
8035                           struct ixgbe_l2_tn_key *key)
8036 {
8037         int ret;
8038         struct ixgbe_l2_tn_filter *l2_tn_filter;
8039
8040         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
8041
8042         if (ret < 0) {
8043                 PMD_DRV_LOG(ERR,
8044                             "No such L2 tunnel filter to delete %d!",
8045                             ret);
8046                 return ret;
8047         }
8048
8049         l2_tn_filter = l2_tn_info->hash_map[ret];
8050         l2_tn_info->hash_map[ret] = NULL;
8051
8052         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
8053         rte_free(l2_tn_filter);
8054
8055         return 0;
8056 }
8057
8058 /* Add l2 tunnel filter */
8059 int
8060 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
8061                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
8062                                bool restore)
8063 {
8064         int ret;
8065         struct ixgbe_l2_tn_info *l2_tn_info =
8066                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8067         struct ixgbe_l2_tn_key key;
8068         struct ixgbe_l2_tn_filter *node;
8069
8070         if (!restore) {
8071                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
8072                 key.tn_id = l2_tunnel->tunnel_id;
8073
8074                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
8075
8076                 if (node) {
8077                         PMD_DRV_LOG(ERR,
8078                                     "The L2 tunnel filter already exists!");
8079                         return -EINVAL;
8080                 }
8081
8082                 node = rte_zmalloc("ixgbe_l2_tn",
8083                                    sizeof(struct ixgbe_l2_tn_filter),
8084                                    0);
8085                 if (!node)
8086                         return -ENOMEM;
8087
8088                 rte_memcpy(&node->key,
8089                                  &key,
8090                                  sizeof(struct ixgbe_l2_tn_key));
8091                 node->pool = l2_tunnel->pool;
8092                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
8093                 if (ret < 0) {
8094                         rte_free(node);
8095                         return ret;
8096                 }
8097         }
8098
8099         switch (l2_tunnel->l2_tunnel_type) {
8100         case RTE_L2_TUNNEL_TYPE_E_TAG:
8101                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
8102                 break;
8103         default:
8104                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8105                 ret = -EINVAL;
8106                 break;
8107         }
8108
8109         if ((!restore) && (ret < 0))
8110                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
8111
8112         return ret;
8113 }
8114
8115 /* Delete l2 tunnel filter */
8116 int
8117 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
8118                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
8119 {
8120         int ret;
8121         struct ixgbe_l2_tn_info *l2_tn_info =
8122                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8123         struct ixgbe_l2_tn_key key;
8124
8125         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
8126         key.tn_id = l2_tunnel->tunnel_id;
8127         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
8128         if (ret < 0)
8129                 return ret;
8130
8131         switch (l2_tunnel->l2_tunnel_type) {
8132         case RTE_L2_TUNNEL_TYPE_E_TAG:
8133                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
8134                 break;
8135         default:
8136                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8137                 ret = -EINVAL;
8138                 break;
8139         }
8140
8141         return ret;
8142 }
8143
8144 /**
8145  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
8146  * @dev: pointer to rte_eth_dev structure
8147  * @filter_op:operation will be taken.
8148  * @arg: a pointer to specific structure corresponding to the filter_op
8149  */
8150 static int
8151 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
8152                                   enum rte_filter_op filter_op,
8153                                   void *arg)
8154 {
8155         int ret;
8156
8157         if (filter_op == RTE_ETH_FILTER_NOP)
8158                 return 0;
8159
8160         if (arg == NULL) {
8161                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
8162                             filter_op);
8163                 return -EINVAL;
8164         }
8165
8166         switch (filter_op) {
8167         case RTE_ETH_FILTER_ADD:
8168                 ret = ixgbe_dev_l2_tunnel_filter_add
8169                         (dev,
8170                          (struct rte_eth_l2_tunnel_conf *)arg,
8171                          FALSE);
8172                 break;
8173         case RTE_ETH_FILTER_DELETE:
8174                 ret = ixgbe_dev_l2_tunnel_filter_del
8175                         (dev,
8176                          (struct rte_eth_l2_tunnel_conf *)arg);
8177                 break;
8178         default:
8179                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
8180                 ret = -EINVAL;
8181                 break;
8182         }
8183         return ret;
8184 }
8185
8186 static int
8187 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
8188 {
8189         int ret = 0;
8190         uint32_t ctrl;
8191         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8192
8193         if (hw->mac.type != ixgbe_mac_X550 &&
8194             hw->mac.type != ixgbe_mac_X550EM_x &&
8195             hw->mac.type != ixgbe_mac_X550EM_a) {
8196                 return -ENOTSUP;
8197         }
8198
8199         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
8200         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
8201         if (en)
8202                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
8203         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
8204
8205         return ret;
8206 }
8207
8208 /* Enable l2 tunnel forwarding */
8209 static int
8210 ixgbe_dev_l2_tunnel_forwarding_enable
8211         (struct rte_eth_dev *dev,
8212          enum rte_eth_tunnel_type l2_tunnel_type)
8213 {
8214         struct ixgbe_l2_tn_info *l2_tn_info =
8215                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8216         int ret = 0;
8217
8218         switch (l2_tunnel_type) {
8219         case RTE_L2_TUNNEL_TYPE_E_TAG:
8220                 l2_tn_info->e_tag_fwd_en = TRUE;
8221                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
8222                 break;
8223         default:
8224                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8225                 ret = -EINVAL;
8226                 break;
8227         }
8228
8229         return ret;
8230 }
8231
8232 /* Disable l2 tunnel forwarding */
8233 static int
8234 ixgbe_dev_l2_tunnel_forwarding_disable
8235         (struct rte_eth_dev *dev,
8236          enum rte_eth_tunnel_type l2_tunnel_type)
8237 {
8238         struct ixgbe_l2_tn_info *l2_tn_info =
8239                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8240         int ret = 0;
8241
8242         switch (l2_tunnel_type) {
8243         case RTE_L2_TUNNEL_TYPE_E_TAG:
8244                 l2_tn_info->e_tag_fwd_en = FALSE;
8245                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
8246                 break;
8247         default:
8248                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8249                 ret = -EINVAL;
8250                 break;
8251         }
8252
8253         return ret;
8254 }
8255
8256 static int
8257 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
8258                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
8259                              bool en)
8260 {
8261         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
8262         int ret = 0;
8263         uint32_t vmtir, vmvir;
8264         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8265
8266         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
8267                 PMD_DRV_LOG(ERR,
8268                             "VF id %u should be less than %u",
8269                             l2_tunnel->vf_id,
8270                             pci_dev->max_vfs);
8271                 return -EINVAL;
8272         }
8273
8274         if (hw->mac.type != ixgbe_mac_X550 &&
8275             hw->mac.type != ixgbe_mac_X550EM_x &&
8276             hw->mac.type != ixgbe_mac_X550EM_a) {
8277                 return -ENOTSUP;
8278         }
8279
8280         if (en)
8281                 vmtir = l2_tunnel->tunnel_id;
8282         else
8283                 vmtir = 0;
8284
8285         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
8286
8287         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
8288         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
8289         if (en)
8290                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
8291         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
8292
8293         return ret;
8294 }
8295
8296 /* Enable l2 tunnel tag insertion */
8297 static int
8298 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
8299                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
8300 {
8301         int ret = 0;
8302
8303         switch (l2_tunnel->l2_tunnel_type) {
8304         case RTE_L2_TUNNEL_TYPE_E_TAG:
8305                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
8306                 break;
8307         default:
8308                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8309                 ret = -EINVAL;
8310                 break;
8311         }
8312
8313         return ret;
8314 }
8315
8316 /* Disable l2 tunnel tag insertion */
8317 static int
8318 ixgbe_dev_l2_tunnel_insertion_disable
8319         (struct rte_eth_dev *dev,
8320          struct rte_eth_l2_tunnel_conf *l2_tunnel)
8321 {
8322         int ret = 0;
8323
8324         switch (l2_tunnel->l2_tunnel_type) {
8325         case RTE_L2_TUNNEL_TYPE_E_TAG:
8326                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
8327                 break;
8328         default:
8329                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8330                 ret = -EINVAL;
8331                 break;
8332         }
8333
8334         return ret;
8335 }
8336
8337 static int
8338 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
8339                              bool en)
8340 {
8341         int ret = 0;
8342         uint32_t qde;
8343         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8344
8345         if (hw->mac.type != ixgbe_mac_X550 &&
8346             hw->mac.type != ixgbe_mac_X550EM_x &&
8347             hw->mac.type != ixgbe_mac_X550EM_a) {
8348                 return -ENOTSUP;
8349         }
8350
8351         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
8352         if (en)
8353                 qde |= IXGBE_QDE_STRIP_TAG;
8354         else
8355                 qde &= ~IXGBE_QDE_STRIP_TAG;
8356         qde &= ~IXGBE_QDE_READ;
8357         qde |= IXGBE_QDE_WRITE;
8358         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
8359
8360         return ret;
8361 }
8362
8363 /* Enable l2 tunnel tag stripping */
8364 static int
8365 ixgbe_dev_l2_tunnel_stripping_enable
8366         (struct rte_eth_dev *dev,
8367          enum rte_eth_tunnel_type l2_tunnel_type)
8368 {
8369         int ret = 0;
8370
8371         switch (l2_tunnel_type) {
8372         case RTE_L2_TUNNEL_TYPE_E_TAG:
8373                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
8374                 break;
8375         default:
8376                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8377                 ret = -EINVAL;
8378                 break;
8379         }
8380
8381         return ret;
8382 }
8383
8384 /* Disable l2 tunnel tag stripping */
8385 static int
8386 ixgbe_dev_l2_tunnel_stripping_disable
8387         (struct rte_eth_dev *dev,
8388          enum rte_eth_tunnel_type l2_tunnel_type)
8389 {
8390         int ret = 0;
8391
8392         switch (l2_tunnel_type) {
8393         case RTE_L2_TUNNEL_TYPE_E_TAG:
8394                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
8395                 break;
8396         default:
8397                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8398                 ret = -EINVAL;
8399                 break;
8400         }
8401
8402         return ret;
8403 }
8404
8405 /* Enable/disable l2 tunnel offload functions */
8406 static int
8407 ixgbe_dev_l2_tunnel_offload_set
8408         (struct rte_eth_dev *dev,
8409          struct rte_eth_l2_tunnel_conf *l2_tunnel,
8410          uint32_t mask,
8411          uint8_t en)
8412 {
8413         int ret = 0;
8414
8415         if (l2_tunnel == NULL)
8416                 return -EINVAL;
8417
8418         ret = -EINVAL;
8419         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
8420                 if (en)
8421                         ret = ixgbe_dev_l2_tunnel_enable(
8422                                 dev,
8423                                 l2_tunnel->l2_tunnel_type);
8424                 else
8425                         ret = ixgbe_dev_l2_tunnel_disable(
8426                                 dev,
8427                                 l2_tunnel->l2_tunnel_type);
8428         }
8429
8430         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
8431                 if (en)
8432                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
8433                                 dev,
8434                                 l2_tunnel);
8435                 else
8436                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
8437                                 dev,
8438                                 l2_tunnel);
8439         }
8440
8441         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
8442                 if (en)
8443                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
8444                                 dev,
8445                                 l2_tunnel->l2_tunnel_type);
8446                 else
8447                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
8448                                 dev,
8449                                 l2_tunnel->l2_tunnel_type);
8450         }
8451
8452         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
8453                 if (en)
8454                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
8455                                 dev,
8456                                 l2_tunnel->l2_tunnel_type);
8457                 else
8458                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
8459                                 dev,
8460                                 l2_tunnel->l2_tunnel_type);
8461         }
8462
8463         return ret;
8464 }
8465
8466 static int
8467 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
8468                         uint16_t port)
8469 {
8470         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
8471         IXGBE_WRITE_FLUSH(hw);
8472
8473         return 0;
8474 }
8475
8476 /* There's only one register for VxLAN UDP port.
8477  * So, we cannot add several ports. Will update it.
8478  */
8479 static int
8480 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8481                      uint16_t port)
8482 {
8483         if (port == 0) {
8484                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8485                 return -EINVAL;
8486         }
8487
8488         return ixgbe_update_vxlan_port(hw, port);
8489 }
8490
8491 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8492  * UDP port, it must have a value.
8493  * So, will reset it to the original value 0.
8494  */
8495 static int
8496 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8497                      uint16_t port)
8498 {
8499         uint16_t cur_port;
8500
8501         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8502
8503         if (cur_port != port) {
8504                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8505                 return -EINVAL;
8506         }
8507
8508         return ixgbe_update_vxlan_port(hw, 0);
8509 }
8510
8511 /* Add UDP tunneling port */
8512 static int
8513 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8514                               struct rte_eth_udp_tunnel *udp_tunnel)
8515 {
8516         int ret = 0;
8517         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8518
8519         if (hw->mac.type != ixgbe_mac_X550 &&
8520             hw->mac.type != ixgbe_mac_X550EM_x &&
8521             hw->mac.type != ixgbe_mac_X550EM_a) {
8522                 return -ENOTSUP;
8523         }
8524
8525         if (udp_tunnel == NULL)
8526                 return -EINVAL;
8527
8528         switch (udp_tunnel->prot_type) {
8529         case RTE_TUNNEL_TYPE_VXLAN:
8530                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8531                 break;
8532
8533         case RTE_TUNNEL_TYPE_GENEVE:
8534         case RTE_TUNNEL_TYPE_TEREDO:
8535                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8536                 ret = -EINVAL;
8537                 break;
8538
8539         default:
8540                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8541                 ret = -EINVAL;
8542                 break;
8543         }
8544
8545         return ret;
8546 }
8547
8548 /* Remove UDP tunneling port */
8549 static int
8550 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8551                               struct rte_eth_udp_tunnel *udp_tunnel)
8552 {
8553         int ret = 0;
8554         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8555
8556         if (hw->mac.type != ixgbe_mac_X550 &&
8557             hw->mac.type != ixgbe_mac_X550EM_x &&
8558             hw->mac.type != ixgbe_mac_X550EM_a) {
8559                 return -ENOTSUP;
8560         }
8561
8562         if (udp_tunnel == NULL)
8563                 return -EINVAL;
8564
8565         switch (udp_tunnel->prot_type) {
8566         case RTE_TUNNEL_TYPE_VXLAN:
8567                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8568                 break;
8569         case RTE_TUNNEL_TYPE_GENEVE:
8570         case RTE_TUNNEL_TYPE_TEREDO:
8571                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8572                 ret = -EINVAL;
8573                 break;
8574         default:
8575                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8576                 ret = -EINVAL;
8577                 break;
8578         }
8579
8580         return ret;
8581 }
8582
8583 static int
8584 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
8585 {
8586         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8587         int ret;
8588
8589         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) {
8590         case IXGBE_SUCCESS:
8591                 ret = 0;
8592                 break;
8593         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8594                 ret = -ENOTSUP;
8595                 break;
8596         default:
8597                 ret = -EAGAIN;
8598                 break;
8599         }
8600
8601         return ret;
8602 }
8603
8604 static int
8605 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
8606 {
8607         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8608         int ret;
8609
8610         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) {
8611         case IXGBE_SUCCESS:
8612                 ret = 0;
8613                 break;
8614         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8615                 ret = -ENOTSUP;
8616                 break;
8617         default:
8618                 ret = -EAGAIN;
8619                 break;
8620         }
8621
8622         return ret;
8623 }
8624
8625 static int
8626 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8627 {
8628         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8629         int ret;
8630         int mode = IXGBEVF_XCAST_MODE_ALLMULTI;
8631
8632         switch (hw->mac.ops.update_xcast_mode(hw, mode)) {
8633         case IXGBE_SUCCESS:
8634                 ret = 0;
8635                 break;
8636         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8637                 ret = -ENOTSUP;
8638                 break;
8639         default:
8640                 ret = -EAGAIN;
8641                 break;
8642         }
8643
8644         return ret;
8645 }
8646
8647 static int
8648 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8649 {
8650         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8651         int ret;
8652
8653         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) {
8654         case IXGBE_SUCCESS:
8655                 ret = 0;
8656                 break;
8657         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8658                 ret = -ENOTSUP;
8659                 break;
8660         default:
8661                 ret = -EAGAIN;
8662                 break;
8663         }
8664
8665         return ret;
8666 }
8667
8668 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8669 {
8670         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8671         u32 in_msg = 0;
8672
8673         /* peek the message first */
8674         in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
8675
8676         /* PF reset VF event */
8677         if (in_msg == IXGBE_PF_CONTROL_MSG) {
8678                 /* dummy mbx read to ack pf */
8679                 if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8680                         return;
8681                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8682                                               NULL);
8683         }
8684 }
8685
8686 static int
8687 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8688 {
8689         uint32_t eicr;
8690         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8691         struct ixgbe_interrupt *intr =
8692                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8693         ixgbevf_intr_disable(dev);
8694
8695         /* read-on-clear nic registers here */
8696         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8697         intr->flags = 0;
8698
8699         /* only one misc vector supported - mailbox */
8700         eicr &= IXGBE_VTEICR_MASK;
8701         if (eicr == IXGBE_MISC_VEC_ID)
8702                 intr->flags |= IXGBE_FLAG_MAILBOX;
8703
8704         return 0;
8705 }
8706
8707 static int
8708 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8709 {
8710         struct ixgbe_interrupt *intr =
8711                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8712
8713         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8714                 ixgbevf_mbx_process(dev);
8715                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8716         }
8717
8718         ixgbevf_intr_enable(dev);
8719
8720         return 0;
8721 }
8722
8723 static void
8724 ixgbevf_dev_interrupt_handler(void *param)
8725 {
8726         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8727
8728         ixgbevf_dev_interrupt_get_status(dev);
8729         ixgbevf_dev_interrupt_action(dev);
8730 }
8731
8732 /**
8733  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8734  *  @hw: pointer to hardware structure
8735  *
8736  *  Stops the transmit data path and waits for the HW to internally empty
8737  *  the Tx security block
8738  **/
8739 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8740 {
8741 #define IXGBE_MAX_SECTX_POLL 40
8742
8743         int i;
8744         int sectxreg;
8745
8746         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8747         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8748         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8749         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8750                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8751                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8752                         break;
8753                 /* Use interrupt-safe sleep just in case */
8754                 usec_delay(1000);
8755         }
8756
8757         /* For informational purposes only */
8758         if (i >= IXGBE_MAX_SECTX_POLL)
8759                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8760                          "path fully disabled.  Continuing with init.");
8761
8762         return IXGBE_SUCCESS;
8763 }
8764
8765 /**
8766  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8767  *  @hw: pointer to hardware structure
8768  *
8769  *  Enables the transmit data path.
8770  **/
8771 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8772 {
8773         uint32_t sectxreg;
8774
8775         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8776         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8777         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8778         IXGBE_WRITE_FLUSH(hw);
8779
8780         return IXGBE_SUCCESS;
8781 }
8782
8783 /* restore n-tuple filter */
8784 static inline void
8785 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8786 {
8787         struct ixgbe_filter_info *filter_info =
8788                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8789         struct ixgbe_5tuple_filter *node;
8790
8791         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8792                 ixgbe_inject_5tuple_filter(dev, node);
8793         }
8794 }
8795
8796 /* restore ethernet type filter */
8797 static inline void
8798 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8799 {
8800         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8801         struct ixgbe_filter_info *filter_info =
8802                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8803         int i;
8804
8805         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8806                 if (filter_info->ethertype_mask & (1 << i)) {
8807                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8808                                         filter_info->ethertype_filters[i].etqf);
8809                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8810                                         filter_info->ethertype_filters[i].etqs);
8811                         IXGBE_WRITE_FLUSH(hw);
8812                 }
8813         }
8814 }
8815
8816 /* restore SYN filter */
8817 static inline void
8818 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8819 {
8820         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8821         struct ixgbe_filter_info *filter_info =
8822                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8823         uint32_t synqf;
8824
8825         synqf = filter_info->syn_info;
8826
8827         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8828                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8829                 IXGBE_WRITE_FLUSH(hw);
8830         }
8831 }
8832
8833 /* restore L2 tunnel filter */
8834 static inline void
8835 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8836 {
8837         struct ixgbe_l2_tn_info *l2_tn_info =
8838                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8839         struct ixgbe_l2_tn_filter *node;
8840         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8841
8842         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8843                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8844                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8845                 l2_tn_conf.pool           = node->pool;
8846                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8847         }
8848 }
8849
8850 /* restore rss filter */
8851 static inline void
8852 ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
8853 {
8854         struct ixgbe_filter_info *filter_info =
8855                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8856
8857         if (filter_info->rss_info.conf.queue_num)
8858                 ixgbe_config_rss_filter(dev,
8859                         &filter_info->rss_info, TRUE);
8860 }
8861
8862 static int
8863 ixgbe_filter_restore(struct rte_eth_dev *dev)
8864 {
8865         ixgbe_ntuple_filter_restore(dev);
8866         ixgbe_ethertype_filter_restore(dev);
8867         ixgbe_syn_filter_restore(dev);
8868         ixgbe_fdir_filter_restore(dev);
8869         ixgbe_l2_tn_filter_restore(dev);
8870         ixgbe_rss_filter_restore(dev);
8871
8872         return 0;
8873 }
8874
8875 static void
8876 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8877 {
8878         struct ixgbe_l2_tn_info *l2_tn_info =
8879                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8880         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8881
8882         if (l2_tn_info->e_tag_en)
8883                 (void)ixgbe_e_tag_enable(hw);
8884
8885         if (l2_tn_info->e_tag_fwd_en)
8886                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8887
8888         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8889 }
8890
8891 /* remove all the n-tuple filters */
8892 void
8893 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8894 {
8895         struct ixgbe_filter_info *filter_info =
8896                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8897         struct ixgbe_5tuple_filter *p_5tuple;
8898
8899         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8900                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8901 }
8902
8903 /* remove all the ether type filters */
8904 void
8905 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8906 {
8907         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8908         struct ixgbe_filter_info *filter_info =
8909                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8910         int i;
8911
8912         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8913                 if (filter_info->ethertype_mask & (1 << i) &&
8914                     !filter_info->ethertype_filters[i].conf) {
8915                         (void)ixgbe_ethertype_filter_remove(filter_info,
8916                                                             (uint8_t)i);
8917                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8918                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8919                         IXGBE_WRITE_FLUSH(hw);
8920                 }
8921         }
8922 }
8923
8924 /* remove the SYN filter */
8925 void
8926 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8927 {
8928         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8929         struct ixgbe_filter_info *filter_info =
8930                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8931
8932         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8933                 filter_info->syn_info = 0;
8934
8935                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8936                 IXGBE_WRITE_FLUSH(hw);
8937         }
8938 }
8939
8940 /* remove all the L2 tunnel filters */
8941 int
8942 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8943 {
8944         struct ixgbe_l2_tn_info *l2_tn_info =
8945                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8946         struct ixgbe_l2_tn_filter *l2_tn_filter;
8947         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8948         int ret = 0;
8949
8950         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8951                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8952                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8953                 l2_tn_conf.pool           = l2_tn_filter->pool;
8954                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8955                 if (ret < 0)
8956                         return ret;
8957         }
8958
8959         return 0;
8960 }
8961
8962 void
8963 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev,
8964                                 struct ixgbe_macsec_setting *macsec_setting)
8965 {
8966         struct ixgbe_macsec_setting *macsec =
8967                 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
8968
8969         macsec->offload_en = macsec_setting->offload_en;
8970         macsec->encrypt_en = macsec_setting->encrypt_en;
8971         macsec->replayprotect_en = macsec_setting->replayprotect_en;
8972 }
8973
8974 void
8975 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev)
8976 {
8977         struct ixgbe_macsec_setting *macsec =
8978                 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
8979
8980         macsec->offload_en = 0;
8981         macsec->encrypt_en = 0;
8982         macsec->replayprotect_en = 0;
8983 }
8984
8985 void
8986 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
8987                                 struct ixgbe_macsec_setting *macsec_setting)
8988 {
8989         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8990         uint32_t ctrl;
8991         uint8_t en = macsec_setting->encrypt_en;
8992         uint8_t rp = macsec_setting->replayprotect_en;
8993
8994         /**
8995          * Workaround:
8996          * As no ixgbe_disable_sec_rx_path equivalent is
8997          * implemented for tx in the base code, and we are
8998          * not allowed to modify the base code in DPDK, so
8999          * just call the hand-written one directly for now.
9000          * The hardware support has been checked by
9001          * ixgbe_disable_sec_rx_path().
9002          */
9003         ixgbe_disable_sec_tx_path_generic(hw);
9004
9005         /* Enable Ethernet CRC (required by MACsec offload) */
9006         ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
9007         ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
9008         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
9009
9010         /* Enable the TX and RX crypto engines */
9011         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
9012         ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
9013         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
9014
9015         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
9016         ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
9017         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
9018
9019         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
9020         ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
9021         ctrl |= 0x3;
9022         IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
9023
9024         /* Enable SA lookup */
9025         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
9026         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
9027         ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
9028                      IXGBE_LSECTXCTRL_AUTH;
9029         ctrl |= IXGBE_LSECTXCTRL_AISCI;
9030         ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
9031         ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
9032         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
9033
9034         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
9035         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
9036         ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
9037         ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
9038         if (rp)
9039                 ctrl |= IXGBE_LSECRXCTRL_RP;
9040         else
9041                 ctrl &= ~IXGBE_LSECRXCTRL_RP;
9042         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
9043
9044         /* Start the data paths */
9045         ixgbe_enable_sec_rx_path(hw);
9046         /**
9047          * Workaround:
9048          * As no ixgbe_enable_sec_rx_path equivalent is
9049          * implemented for tx in the base code, and we are
9050          * not allowed to modify the base code in DPDK, so
9051          * just call the hand-written one directly for now.
9052          */
9053         ixgbe_enable_sec_tx_path_generic(hw);
9054 }
9055
9056 void
9057 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev)
9058 {
9059         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9060         uint32_t ctrl;
9061
9062         /**
9063          * Workaround:
9064          * As no ixgbe_disable_sec_rx_path equivalent is
9065          * implemented for tx in the base code, and we are
9066          * not allowed to modify the base code in DPDK, so
9067          * just call the hand-written one directly for now.
9068          * The hardware support has been checked by
9069          * ixgbe_disable_sec_rx_path().
9070          */
9071         ixgbe_disable_sec_tx_path_generic(hw);
9072
9073         /* Disable the TX and RX crypto engines */
9074         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
9075         ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
9076         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
9077
9078         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
9079         ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
9080         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
9081
9082         /* Disable SA lookup */
9083         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
9084         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
9085         ctrl |= IXGBE_LSECTXCTRL_DISABLE;
9086         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
9087
9088         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
9089         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
9090         ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
9091         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
9092
9093         /* Start the data paths */
9094         ixgbe_enable_sec_rx_path(hw);
9095         /**
9096          * Workaround:
9097          * As no ixgbe_enable_sec_rx_path equivalent is
9098          * implemented for tx in the base code, and we are
9099          * not allowed to modify the base code in DPDK, so
9100          * just call the hand-written one directly for now.
9101          */
9102         ixgbe_enable_sec_tx_path_generic(hw);
9103 }
9104
9105 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
9106 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
9107 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
9108 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
9109 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
9110 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
9111 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf,
9112                               IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>");
9113
9114 RTE_LOG_REGISTER(ixgbe_logtype_init, pmd.net.ixgbe.init, NOTICE);
9115 RTE_LOG_REGISTER(ixgbe_logtype_driver, pmd.net.ixgbe.driver, NOTICE);
9116
9117 #ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
9118 RTE_LOG_REGISTER(ixgbe_logtype_rx, pmd.net.ixgbe.rx, DEBUG);
9119 #endif
9120 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
9121 RTE_LOG_REGISTER(ixgbe_logtype_tx, pmd.net.ixgbe.tx, DEBUG);
9122 #endif
9123 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
9124 RTE_LOG_REGISTER(ixgbe_logtype_tx_free, pmd.net.ixgbe.tx_free, DEBUG);
9125 #endif