examples/fips_validation: fix string token for CT length
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_string_fns.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_kvargs.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_dev.h>
35 #include <rte_hash_crc.h>
36 #ifdef RTE_LIBRTE_SECURITY
37 #include <rte_security_driver.h>
38 #endif
39
40 #include "ixgbe_logs.h"
41 #include "base/ixgbe_api.h"
42 #include "base/ixgbe_vf.h"
43 #include "base/ixgbe_common.h"
44 #include "ixgbe_ethdev.h"
45 #include "ixgbe_bypass.h"
46 #include "ixgbe_rxtx.h"
47 #include "base/ixgbe_type.h"
48 #include "base/ixgbe_phy.h"
49 #include "ixgbe_regs.h"
50
51 /*
52  * High threshold controlling when to start sending XOFF frames. Must be at
53  * least 8 bytes less than receive packet buffer size. This value is in units
54  * of 1024 bytes.
55  */
56 #define IXGBE_FC_HI    0x80
57
58 /*
59  * Low threshold controlling when to start sending XON frames. This value is
60  * in units of 1024 bytes.
61  */
62 #define IXGBE_FC_LO    0x40
63
64 /* Timer value included in XOFF frames. */
65 #define IXGBE_FC_PAUSE 0x680
66
67 /*Default value of Max Rx Queue*/
68 #define IXGBE_MAX_RX_QUEUE_NUM 128
69
70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
71 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
72 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
73
74 #define IXGBE_MMW_SIZE_DEFAULT        0x4
75 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
76 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
77
78 /*
79  *  Default values for RX/TX configuration
80  */
81 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
82 #define IXGBE_DEFAULT_RX_PTHRESH      8
83 #define IXGBE_DEFAULT_RX_HTHRESH      8
84 #define IXGBE_DEFAULT_RX_WTHRESH      0
85
86 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
87 #define IXGBE_DEFAULT_TX_PTHRESH      32
88 #define IXGBE_DEFAULT_TX_HTHRESH      0
89 #define IXGBE_DEFAULT_TX_WTHRESH      0
90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
91
92 /* Bit shift and mask */
93 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
94 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
95 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
96 #define IXGBE_8_BIT_MASK   UINT8_MAX
97
98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
99
100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
101
102 /* Additional timesync values. */
103 #define NSEC_PER_SEC             1000000000L
104 #define IXGBE_INCVAL_10GB        0x66666666
105 #define IXGBE_INCVAL_1GB         0x40000000
106 #define IXGBE_INCVAL_100         0x50000000
107 #define IXGBE_INCVAL_SHIFT_10GB  28
108 #define IXGBE_INCVAL_SHIFT_1GB   24
109 #define IXGBE_INCVAL_SHIFT_100   21
110 #define IXGBE_INCVAL_SHIFT_82599 7
111 #define IXGBE_INCPER_SHIFT_82599 24
112
113 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
114
115 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
117 #define IXGBE_ETAG_ETYPE                       0x00005084
118 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
119 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
120 #define IXGBE_RAH_ADTYPE                       0x40000000
121 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
122 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
125 #define IXGBE_QDE_STRIP_TAG                    0x00000004
126 #define IXGBE_VTEICR_MASK                      0x07
127
128 #define IXGBE_EXVET_VET_EXT_SHIFT              16
129 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
130
131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK           "pflink_fullchk"
132
133 static const char * const ixgbevf_valid_arguments[] = {
134         IXGBEVF_DEVARG_PFLINK_FULLCHK,
135         NULL
136 };
137
138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
145 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
146 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
147 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
148 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
149 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
150 static void ixgbe_dev_close(struct rte_eth_dev *dev);
151 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
157                                 int wait_to_complete);
158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
159                                 struct rte_eth_stats *stats);
160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
161                                 struct rte_eth_xstat *xstats, unsigned n);
162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
163                                   struct rte_eth_xstat *xstats, unsigned n);
164 static int
165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
166                 uint64_t *values, unsigned int n);
167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
170         struct rte_eth_xstat_name *xstats_names,
171         unsigned int size);
172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
173         struct rte_eth_xstat_name *xstats_names, unsigned limit);
174 static int ixgbe_dev_xstats_get_names_by_id(
175         struct rte_eth_dev *dev,
176         struct rte_eth_xstat_name *xstats_names,
177         const uint64_t *ids,
178         unsigned int limit);
179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
180                                              uint16_t queue_id,
181                                              uint8_t stat_idx,
182                                              uint8_t is_rx);
183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
184                                  size_t fw_size);
185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev,
186                               struct rte_eth_dev_info *dev_info);
187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev,
189                                 struct rte_eth_dev_info *dev_info);
190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
191
192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
193                 uint16_t vlan_id, int on);
194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
195                                enum rte_vlan_type vlan_type,
196                                uint16_t tpid_id);
197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
198                 uint16_t queue, bool on);
199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
200                 int on);
201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
202                                                   int mask);
203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
209
210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
213                                struct rte_eth_fc_conf *fc_conf);
214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
215                                struct rte_eth_fc_conf *fc_conf);
216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
217                 struct rte_eth_pfc_conf *pfc_conf);
218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
219                         struct rte_eth_rss_reta_entry64 *reta_conf,
220                         uint16_t reta_size);
221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
222                         struct rte_eth_rss_reta_entry64 *reta_conf,
223                         uint16_t reta_size);
224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
230 static void ixgbe_dev_interrupt_handler(void *param);
231 static void ixgbe_dev_interrupt_delayed_handler(void *param);
232 static void ixgbe_dev_setup_link_alarm_handler(void *param);
233
234 static int ixgbe_add_rar(struct rte_eth_dev *dev,
235                         struct rte_ether_addr *mac_addr,
236                         uint32_t index, uint32_t pool);
237 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
238 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
239                                            struct rte_ether_addr *mac_addr);
240 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
241 static bool is_device_supported(struct rte_eth_dev *dev,
242                                 struct rte_pci_driver *drv);
243
244 /* For Virtual Function support */
245 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
246 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
247 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
248 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
249 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
250                                    int wait_to_complete);
251 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
252 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
253 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
254 static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
255 static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
256 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
257                 struct rte_eth_stats *stats);
258 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
259 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
260                 uint16_t vlan_id, int on);
261 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
262                 uint16_t queue, int on);
263 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
264 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
265 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
266 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
267                                             uint16_t queue_id);
268 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
269                                              uint16_t queue_id);
270 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
271                                  uint8_t queue, uint8_t msix_vector);
272 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
273 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev);
274 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev);
275 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
276 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
277
278 /* For Eth VMDQ APIs support */
279 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
280                 rte_ether_addr * mac_addr, uint8_t on);
281 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
282 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
283                 struct rte_eth_mirror_conf *mirror_conf,
284                 uint8_t rule_id, uint8_t on);
285 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
286                 uint8_t rule_id);
287 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
288                                           uint16_t queue_id);
289 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
290                                            uint16_t queue_id);
291 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
292                                uint8_t queue, uint8_t msix_vector);
293 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
294
295 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
296                                 struct rte_ether_addr *mac_addr,
297                                 uint32_t index, uint32_t pool);
298 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
299 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
300                                              struct rte_ether_addr *mac_addr);
301 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
302                         struct rte_eth_syn_filter *filter);
303 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
304                         enum rte_filter_op filter_op,
305                         void *arg);
306 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
307                         struct ixgbe_5tuple_filter *filter);
308 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
309                         struct ixgbe_5tuple_filter *filter);
310 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
311                                 enum rte_filter_op filter_op,
312                                 void *arg);
313 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
314                         struct rte_eth_ntuple_filter *filter);
315 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
316                                 enum rte_filter_op filter_op,
317                                 void *arg);
318 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
319                         struct rte_eth_ethertype_filter *filter);
320 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
321                      enum rte_filter_type filter_type,
322                      enum rte_filter_op filter_op,
323                      void *arg);
324 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
325
326 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
327                                       struct rte_ether_addr *mc_addr_set,
328                                       uint32_t nb_mc_addr);
329 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
330                                    struct rte_eth_dcb_info *dcb_info);
331
332 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
333 static int ixgbe_get_regs(struct rte_eth_dev *dev,
334                             struct rte_dev_reg_info *regs);
335 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
336 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
337                                 struct rte_dev_eeprom_info *eeprom);
338 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
339                                 struct rte_dev_eeprom_info *eeprom);
340
341 static int ixgbe_get_module_info(struct rte_eth_dev *dev,
342                                  struct rte_eth_dev_module_info *modinfo);
343 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
344                                    struct rte_dev_eeprom_info *info);
345
346 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
347 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
348                                 struct rte_dev_reg_info *regs);
349
350 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
351 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
352 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
353                                             struct timespec *timestamp,
354                                             uint32_t flags);
355 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
356                                             struct timespec *timestamp);
357 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
358 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
359                                    struct timespec *timestamp);
360 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
361                                    const struct timespec *timestamp);
362 static void ixgbevf_dev_interrupt_handler(void *param);
363
364 static int ixgbe_dev_l2_tunnel_eth_type_conf
365         (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
366 static int ixgbe_dev_l2_tunnel_offload_set
367         (struct rte_eth_dev *dev,
368          struct rte_eth_l2_tunnel_conf *l2_tunnel,
369          uint32_t mask,
370          uint8_t en);
371 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
372                                              enum rte_filter_op filter_op,
373                                              void *arg);
374
375 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
376                                          struct rte_eth_udp_tunnel *udp_tunnel);
377 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
378                                          struct rte_eth_udp_tunnel *udp_tunnel);
379 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
380 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
381 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw);
382
383 /*
384  * Define VF Stats MACRO for Non "cleared on read" register
385  */
386 #define UPDATE_VF_STAT(reg, last, cur)                          \
387 {                                                               \
388         uint32_t latest = IXGBE_READ_REG(hw, reg);              \
389         cur += (latest - last) & UINT_MAX;                      \
390         last = latest;                                          \
391 }
392
393 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
394 {                                                                \
395         u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
396         u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
397         u64 latest = ((new_msb << 32) | new_lsb);                \
398         cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
399         last = latest;                                           \
400 }
401
402 #define IXGBE_SET_HWSTRIP(h, q) do {\
403                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
404                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
405                 (h)->bitmap[idx] |= 1 << bit;\
406         } while (0)
407
408 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
409                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
410                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
411                 (h)->bitmap[idx] &= ~(1 << bit);\
412         } while (0)
413
414 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
415                 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
416                 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
417                 (r) = (h)->bitmap[idx] >> bit & 1;\
418         } while (0)
419
420 int ixgbe_logtype_init;
421 int ixgbe_logtype_driver;
422
423 #ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
424 int ixgbe_logtype_rx;
425 #endif
426 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
427 int ixgbe_logtype_tx;
428 #endif
429 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
430 int ixgbe_logtype_tx_free;
431 #endif
432
433 /*
434  * The set of PCI devices this driver supports
435  */
436 static const struct rte_pci_id pci_id_ixgbe_map[] = {
437         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
438         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
439         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
440         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
441         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
442         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
443         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
444         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
445         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
446         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
447         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
448         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
449         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
450         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
451         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
452         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
453         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
454         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
455         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
456         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
457         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
458         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
459         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
460         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
461         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
462         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
463         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
464         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
465         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
466         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
467         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
468         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
469         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
470         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
471         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
472         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
473         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
474         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
475         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
476         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
477         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
478         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
479         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
480         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
481         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
482         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
483         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
484         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) },
485 #ifdef RTE_LIBRTE_IXGBE_BYPASS
486         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
487 #endif
488         { .vendor_id = 0, /* sentinel */ },
489 };
490
491 /*
492  * The set of PCI devices this driver supports (for 82599 VF)
493  */
494 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
495         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
496         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
497         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
498         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
499         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
500         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
501         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
502         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
503         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
504         { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
505         { .vendor_id = 0, /* sentinel */ },
506 };
507
508 static const struct rte_eth_desc_lim rx_desc_lim = {
509         .nb_max = IXGBE_MAX_RING_DESC,
510         .nb_min = IXGBE_MIN_RING_DESC,
511         .nb_align = IXGBE_RXD_ALIGN,
512 };
513
514 static const struct rte_eth_desc_lim tx_desc_lim = {
515         .nb_max = IXGBE_MAX_RING_DESC,
516         .nb_min = IXGBE_MIN_RING_DESC,
517         .nb_align = IXGBE_TXD_ALIGN,
518         .nb_seg_max = IXGBE_TX_MAX_SEG,
519         .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
520 };
521
522 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
523         .dev_configure        = ixgbe_dev_configure,
524         .dev_start            = ixgbe_dev_start,
525         .dev_stop             = ixgbe_dev_stop,
526         .dev_set_link_up    = ixgbe_dev_set_link_up,
527         .dev_set_link_down  = ixgbe_dev_set_link_down,
528         .dev_close            = ixgbe_dev_close,
529         .dev_reset            = ixgbe_dev_reset,
530         .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
531         .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
532         .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
533         .allmulticast_disable = ixgbe_dev_allmulticast_disable,
534         .link_update          = ixgbe_dev_link_update,
535         .stats_get            = ixgbe_dev_stats_get,
536         .xstats_get           = ixgbe_dev_xstats_get,
537         .xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
538         .stats_reset          = ixgbe_dev_stats_reset,
539         .xstats_reset         = ixgbe_dev_xstats_reset,
540         .xstats_get_names     = ixgbe_dev_xstats_get_names,
541         .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
542         .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
543         .fw_version_get       = ixgbe_fw_version_get,
544         .dev_infos_get        = ixgbe_dev_info_get,
545         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
546         .mtu_set              = ixgbe_dev_mtu_set,
547         .vlan_filter_set      = ixgbe_vlan_filter_set,
548         .vlan_tpid_set        = ixgbe_vlan_tpid_set,
549         .vlan_offload_set     = ixgbe_vlan_offload_set,
550         .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
551         .rx_queue_start       = ixgbe_dev_rx_queue_start,
552         .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
553         .tx_queue_start       = ixgbe_dev_tx_queue_start,
554         .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
555         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
556         .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
557         .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
558         .rx_queue_release     = ixgbe_dev_rx_queue_release,
559         .rx_queue_count       = ixgbe_dev_rx_queue_count,
560         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
561         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
562         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
563         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
564         .tx_queue_release     = ixgbe_dev_tx_queue_release,
565         .dev_led_on           = ixgbe_dev_led_on,
566         .dev_led_off          = ixgbe_dev_led_off,
567         .flow_ctrl_get        = ixgbe_flow_ctrl_get,
568         .flow_ctrl_set        = ixgbe_flow_ctrl_set,
569         .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
570         .mac_addr_add         = ixgbe_add_rar,
571         .mac_addr_remove      = ixgbe_remove_rar,
572         .mac_addr_set         = ixgbe_set_default_mac_addr,
573         .uc_hash_table_set    = ixgbe_uc_hash_table_set,
574         .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
575         .mirror_rule_set      = ixgbe_mirror_rule_set,
576         .mirror_rule_reset    = ixgbe_mirror_rule_reset,
577         .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
578         .reta_update          = ixgbe_dev_rss_reta_update,
579         .reta_query           = ixgbe_dev_rss_reta_query,
580         .rss_hash_update      = ixgbe_dev_rss_hash_update,
581         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
582         .filter_ctrl          = ixgbe_dev_filter_ctrl,
583         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
584         .rxq_info_get         = ixgbe_rxq_info_get,
585         .txq_info_get         = ixgbe_txq_info_get,
586         .timesync_enable      = ixgbe_timesync_enable,
587         .timesync_disable     = ixgbe_timesync_disable,
588         .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
589         .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
590         .get_reg              = ixgbe_get_regs,
591         .get_eeprom_length    = ixgbe_get_eeprom_length,
592         .get_eeprom           = ixgbe_get_eeprom,
593         .set_eeprom           = ixgbe_set_eeprom,
594         .get_module_info      = ixgbe_get_module_info,
595         .get_module_eeprom    = ixgbe_get_module_eeprom,
596         .get_dcb_info         = ixgbe_dev_get_dcb_info,
597         .timesync_adjust_time = ixgbe_timesync_adjust_time,
598         .timesync_read_time   = ixgbe_timesync_read_time,
599         .timesync_write_time  = ixgbe_timesync_write_time,
600         .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
601         .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
602         .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
603         .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
604         .tm_ops_get           = ixgbe_tm_ops_get,
605         .tx_done_cleanup      = ixgbe_dev_tx_done_cleanup,
606 };
607
608 /*
609  * dev_ops for virtual function, bare necessities for basic vf
610  * operation have been implemented
611  */
612 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
613         .dev_configure        = ixgbevf_dev_configure,
614         .dev_start            = ixgbevf_dev_start,
615         .dev_stop             = ixgbevf_dev_stop,
616         .link_update          = ixgbevf_dev_link_update,
617         .stats_get            = ixgbevf_dev_stats_get,
618         .xstats_get           = ixgbevf_dev_xstats_get,
619         .stats_reset          = ixgbevf_dev_stats_reset,
620         .xstats_reset         = ixgbevf_dev_stats_reset,
621         .xstats_get_names     = ixgbevf_dev_xstats_get_names,
622         .dev_close            = ixgbevf_dev_close,
623         .dev_reset            = ixgbevf_dev_reset,
624         .promiscuous_enable   = ixgbevf_dev_promiscuous_enable,
625         .promiscuous_disable  = ixgbevf_dev_promiscuous_disable,
626         .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
627         .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
628         .dev_infos_get        = ixgbevf_dev_info_get,
629         .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
630         .mtu_set              = ixgbevf_dev_set_mtu,
631         .vlan_filter_set      = ixgbevf_vlan_filter_set,
632         .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
633         .vlan_offload_set     = ixgbevf_vlan_offload_set,
634         .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
635         .rx_queue_release     = ixgbe_dev_rx_queue_release,
636         .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
637         .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
638         .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
639         .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
640         .tx_queue_release     = ixgbe_dev_tx_queue_release,
641         .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
642         .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
643         .mac_addr_add         = ixgbevf_add_mac_addr,
644         .mac_addr_remove      = ixgbevf_remove_mac_addr,
645         .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
646         .rxq_info_get         = ixgbe_rxq_info_get,
647         .txq_info_get         = ixgbe_txq_info_get,
648         .mac_addr_set         = ixgbevf_set_default_mac_addr,
649         .get_reg              = ixgbevf_get_regs,
650         .reta_update          = ixgbe_dev_rss_reta_update,
651         .reta_query           = ixgbe_dev_rss_reta_query,
652         .rss_hash_update      = ixgbe_dev_rss_hash_update,
653         .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
654         .tx_done_cleanup      = ixgbe_dev_tx_done_cleanup,
655 };
656
657 /* store statistics names and its offset in stats structure */
658 struct rte_ixgbe_xstats_name_off {
659         char name[RTE_ETH_XSTATS_NAME_SIZE];
660         unsigned offset;
661 };
662
663 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
664         {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
665         {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
666         {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
667         {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
668         {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
669         {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
670         {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
671         {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
672         {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
673         {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
674         {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
675         {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
676         {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
677         {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
678         {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
679                 prc1023)},
680         {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
681                 prc1522)},
682         {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
683         {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
684         {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
685         {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
686         {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
687         {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
688         {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
689         {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
690         {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
691         {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
692         {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
693         {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
694         {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
695         {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
696         {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
697         {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
698         {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
699                 ptc1023)},
700         {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
701                 ptc1522)},
702         {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
703         {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
704         {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
705         {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
706
707         {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
708                 fdirustat_add)},
709         {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
710                 fdirustat_remove)},
711         {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
712                 fdirfstat_fadd)},
713         {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
714                 fdirfstat_fremove)},
715         {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
716                 fdirmatch)},
717         {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
718                 fdirmiss)},
719
720         {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
721         {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
722         {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
723                 fclast)},
724         {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
725         {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
726         {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
727         {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
728         {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
729                 fcoe_noddp)},
730         {"rx_fcoe_no_direct_data_placement_ext_buff",
731                 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
732
733         {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
734                 lxontxc)},
735         {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
736                 lxonrxc)},
737         {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
738                 lxofftxc)},
739         {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
740                 lxoffrxc)},
741         {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
742 };
743
744 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
745                            sizeof(rte_ixgbe_stats_strings[0]))
746
747 /* MACsec statistics */
748 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
749         {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
750                 out_pkts_untagged)},
751         {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
752                 out_pkts_encrypted)},
753         {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
754                 out_pkts_protected)},
755         {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
756                 out_octets_encrypted)},
757         {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
758                 out_octets_protected)},
759         {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
760                 in_pkts_untagged)},
761         {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
762                 in_pkts_badtag)},
763         {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
764                 in_pkts_nosci)},
765         {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
766                 in_pkts_unknownsci)},
767         {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
768                 in_octets_decrypted)},
769         {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
770                 in_octets_validated)},
771         {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
772                 in_pkts_unchecked)},
773         {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
774                 in_pkts_delayed)},
775         {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
776                 in_pkts_late)},
777         {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
778                 in_pkts_ok)},
779         {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
780                 in_pkts_invalid)},
781         {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
782                 in_pkts_notvalid)},
783         {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
784                 in_pkts_unusedsa)},
785         {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
786                 in_pkts_notusingsa)},
787 };
788
789 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
790                            sizeof(rte_ixgbe_macsec_strings[0]))
791
792 /* Per-queue statistics */
793 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
794         {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
795         {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
796         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
797         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
798 };
799
800 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
801                            sizeof(rte_ixgbe_rxq_strings[0]))
802 #define IXGBE_NB_RXQ_PRIO_VALUES 8
803
804 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
805         {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
806         {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
807         {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
808                 pxon2offc)},
809 };
810
811 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
812                            sizeof(rte_ixgbe_txq_strings[0]))
813 #define IXGBE_NB_TXQ_PRIO_VALUES 8
814
815 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
816         {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
817 };
818
819 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /  \
820                 sizeof(rte_ixgbevf_stats_strings[0]))
821
822 /*
823  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
824  */
825 static inline int
826 ixgbe_is_sfp(struct ixgbe_hw *hw)
827 {
828         switch (hw->phy.type) {
829         case ixgbe_phy_sfp_avago:
830         case ixgbe_phy_sfp_ftl:
831         case ixgbe_phy_sfp_intel:
832         case ixgbe_phy_sfp_unknown:
833         case ixgbe_phy_sfp_passive_tyco:
834         case ixgbe_phy_sfp_passive_unknown:
835                 return 1;
836         default:
837                 return 0;
838         }
839 }
840
841 static inline int32_t
842 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
843 {
844         uint32_t ctrl_ext;
845         int32_t status;
846
847         status = ixgbe_reset_hw(hw);
848
849         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
850         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
851         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
852         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
853         IXGBE_WRITE_FLUSH(hw);
854
855         if (status == IXGBE_ERR_SFP_NOT_PRESENT)
856                 status = IXGBE_SUCCESS;
857         return status;
858 }
859
860 static inline void
861 ixgbe_enable_intr(struct rte_eth_dev *dev)
862 {
863         struct ixgbe_interrupt *intr =
864                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
865         struct ixgbe_hw *hw =
866                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
867
868         IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
869         IXGBE_WRITE_FLUSH(hw);
870 }
871
872 /*
873  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
874  */
875 static void
876 ixgbe_disable_intr(struct ixgbe_hw *hw)
877 {
878         PMD_INIT_FUNC_TRACE();
879
880         if (hw->mac.type == ixgbe_mac_82598EB) {
881                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
882         } else {
883                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
884                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
885                 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
886         }
887         IXGBE_WRITE_FLUSH(hw);
888 }
889
890 /*
891  * This function resets queue statistics mapping registers.
892  * From Niantic datasheet, Initialization of Statistics section:
893  * "...if software requires the queue counters, the RQSMR and TQSM registers
894  * must be re-programmed following a device reset.
895  */
896 static void
897 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
898 {
899         uint32_t i;
900
901         for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
902                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
903                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
904         }
905 }
906
907
908 static int
909 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
910                                   uint16_t queue_id,
911                                   uint8_t stat_idx,
912                                   uint8_t is_rx)
913 {
914 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
915 #define NB_QMAP_FIELDS_PER_QSM_REG 4
916 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
917
918         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
919         struct ixgbe_stat_mapping_registers *stat_mappings =
920                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
921         uint32_t qsmr_mask = 0;
922         uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
923         uint32_t q_map;
924         uint8_t n, offset;
925
926         if ((hw->mac.type != ixgbe_mac_82599EB) &&
927                 (hw->mac.type != ixgbe_mac_X540) &&
928                 (hw->mac.type != ixgbe_mac_X550) &&
929                 (hw->mac.type != ixgbe_mac_X550EM_x) &&
930                 (hw->mac.type != ixgbe_mac_X550EM_a))
931                 return -ENOSYS;
932
933         PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
934                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
935                      queue_id, stat_idx);
936
937         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
938         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
939                 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
940                 return -EIO;
941         }
942         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
943
944         /* Now clear any previous stat_idx set */
945         clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
946         if (!is_rx)
947                 stat_mappings->tqsm[n] &= ~clearing_mask;
948         else
949                 stat_mappings->rqsmr[n] &= ~clearing_mask;
950
951         q_map = (uint32_t)stat_idx;
952         q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
953         qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
954         if (!is_rx)
955                 stat_mappings->tqsm[n] |= qsmr_mask;
956         else
957                 stat_mappings->rqsmr[n] |= qsmr_mask;
958
959         PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
960                      (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
961                      queue_id, stat_idx);
962         PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
963                      is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
964
965         /* Now write the mapping in the appropriate register */
966         if (is_rx) {
967                 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
968                              stat_mappings->rqsmr[n], n);
969                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
970         } else {
971                 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
972                              stat_mappings->tqsm[n], n);
973                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
974         }
975         return 0;
976 }
977
978 static void
979 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
980 {
981         struct ixgbe_stat_mapping_registers *stat_mappings =
982                 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
983         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
984         int i;
985
986         /* write whatever was in stat mapping table to the NIC */
987         for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
988                 /* rx */
989                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
990
991                 /* tx */
992                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
993         }
994 }
995
996 static void
997 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
998 {
999         uint8_t i;
1000         struct ixgbe_dcb_tc_config *tc;
1001         uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
1002
1003         dcb_config->num_tcs.pg_tcs = dcb_max_tc;
1004         dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
1005         for (i = 0; i < dcb_max_tc; i++) {
1006                 tc = &dcb_config->tc_config[i];
1007                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
1008                 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
1009                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1010                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
1011                 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
1012                                  (uint8_t)(100/dcb_max_tc + (i & 1));
1013                 tc->pfc = ixgbe_dcb_pfc_disabled;
1014         }
1015
1016         /* Initialize default user to priority mapping, UPx->TC0 */
1017         tc = &dcb_config->tc_config[0];
1018         tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
1019         tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1020         for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1021                 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1022                 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1023         }
1024         dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1025         dcb_config->pfc_mode_enable = false;
1026         dcb_config->vt_mode = true;
1027         dcb_config->round_robin_enable = false;
1028         /* support all DCB capabilities in 82599 */
1029         dcb_config->support.capabilities = 0xFF;
1030
1031         /*we only support 4 Tcs for X540, X550 */
1032         if (hw->mac.type == ixgbe_mac_X540 ||
1033                 hw->mac.type == ixgbe_mac_X550 ||
1034                 hw->mac.type == ixgbe_mac_X550EM_x ||
1035                 hw->mac.type == ixgbe_mac_X550EM_a) {
1036                 dcb_config->num_tcs.pg_tcs = 4;
1037                 dcb_config->num_tcs.pfc_tcs = 4;
1038         }
1039 }
1040
1041 /*
1042  * Ensure that all locks are released before first NVM or PHY access
1043  */
1044 static void
1045 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1046 {
1047         uint16_t mask;
1048
1049         /*
1050          * Phy lock should not fail in this early stage. If this is the case,
1051          * it is due to an improper exit of the application.
1052          * So force the release of the faulty lock. Release of common lock
1053          * is done automatically by swfw_sync function.
1054          */
1055         mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1056         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1057                 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1058         }
1059         ixgbe_release_swfw_semaphore(hw, mask);
1060
1061         /*
1062          * These ones are more tricky since they are common to all ports; but
1063          * swfw_sync retries last long enough (1s) to be almost sure that if
1064          * lock can not be taken it is due to an improper lock of the
1065          * semaphore.
1066          */
1067         mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1068         if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1069                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1070         }
1071         ixgbe_release_swfw_semaphore(hw, mask);
1072 }
1073
1074 /*
1075  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1076  * It returns 0 on success.
1077  */
1078 static int
1079 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
1080 {
1081         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1082         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1083         struct ixgbe_hw *hw =
1084                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1085         struct ixgbe_vfta *shadow_vfta =
1086                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1087         struct ixgbe_hwstrip *hwstrip =
1088                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1089         struct ixgbe_dcb_config *dcb_config =
1090                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1091         struct ixgbe_filter_info *filter_info =
1092                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1093         struct ixgbe_bw_conf *bw_conf =
1094                 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1095         uint32_t ctrl_ext;
1096         uint16_t csum;
1097         int diag, i;
1098
1099         PMD_INIT_FUNC_TRACE();
1100
1101         ixgbe_dev_macsec_setting_reset(eth_dev);
1102
1103         eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1104         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1105         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1106         eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1107
1108         /*
1109          * For secondary processes, we don't initialise any further as primary
1110          * has already done this work. Only check we don't need a different
1111          * RX and TX function.
1112          */
1113         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1114                 struct ixgbe_tx_queue *txq;
1115                 /* TX queue function in primary, set by last queue initialized
1116                  * Tx queue may not initialized by primary process
1117                  */
1118                 if (eth_dev->data->tx_queues) {
1119                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1120                         ixgbe_set_tx_function(eth_dev, txq);
1121                 } else {
1122                         /* Use default TX function if we get here */
1123                         PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1124                                      "Using default TX function.");
1125                 }
1126
1127                 ixgbe_set_rx_function(eth_dev);
1128
1129                 return 0;
1130         }
1131
1132         rte_eth_copy_pci_info(eth_dev, pci_dev);
1133
1134         /* Vendor and Device ID need to be set before init of shared code */
1135         hw->device_id = pci_dev->id.device_id;
1136         hw->vendor_id = pci_dev->id.vendor_id;
1137         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1138         hw->allow_unsupported_sfp = 1;
1139
1140         /* Initialize the shared code (base driver) */
1141 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1142         diag = ixgbe_bypass_init_shared_code(hw);
1143 #else
1144         diag = ixgbe_init_shared_code(hw);
1145 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1146
1147         if (diag != IXGBE_SUCCESS) {
1148                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1149                 return -EIO;
1150         }
1151
1152         if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
1153                 PMD_INIT_LOG(ERR, "\nERROR: "
1154                         "Firmware recovery mode detected. Limiting functionality.\n"
1155                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1156                         "User Guide for details on firmware recovery mode.");
1157                 return -EIO;
1158         }
1159
1160         /* pick up the PCI bus settings for reporting later */
1161         ixgbe_get_bus_info(hw);
1162
1163         /* Unlock any pending hardware semaphore */
1164         ixgbe_swfw_lock_reset(hw);
1165
1166 #ifdef RTE_LIBRTE_SECURITY
1167         /* Initialize security_ctx only for primary process*/
1168         if (ixgbe_ipsec_ctx_create(eth_dev))
1169                 return -ENOMEM;
1170 #endif
1171
1172         /* Initialize DCB configuration*/
1173         memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1174         ixgbe_dcb_init(hw, dcb_config);
1175         /* Get Hardware Flow Control setting */
1176         hw->fc.requested_mode = ixgbe_fc_full;
1177         hw->fc.current_mode = ixgbe_fc_full;
1178         hw->fc.pause_time = IXGBE_FC_PAUSE;
1179         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1180                 hw->fc.low_water[i] = IXGBE_FC_LO;
1181                 hw->fc.high_water[i] = IXGBE_FC_HI;
1182         }
1183         hw->fc.send_xon = 1;
1184
1185         /* Make sure we have a good EEPROM before we read from it */
1186         diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1187         if (diag != IXGBE_SUCCESS) {
1188                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1189                 return -EIO;
1190         }
1191
1192 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1193         diag = ixgbe_bypass_init_hw(hw);
1194 #else
1195         diag = ixgbe_init_hw(hw);
1196         hw->mac.autotry_restart = false;
1197 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1198
1199         /*
1200          * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1201          * is called too soon after the kernel driver unbinding/binding occurs.
1202          * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1203          * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1204          * also called. See ixgbe_identify_phy_82599(). The reason for the
1205          * failure is not known, and only occuts when virtualisation features
1206          * are disabled in the bios. A delay of 100ms  was found to be enough by
1207          * trial-and-error, and is doubled to be safe.
1208          */
1209         if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1210                 rte_delay_ms(200);
1211                 diag = ixgbe_init_hw(hw);
1212         }
1213
1214         if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1215                 diag = IXGBE_SUCCESS;
1216
1217         if (diag == IXGBE_ERR_EEPROM_VERSION) {
1218                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1219                              "LOM.  Please be aware there may be issues associated "
1220                              "with your hardware.");
1221                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
1222                              "please contact your Intel or hardware representative "
1223                              "who provided you with this hardware.");
1224         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1225                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1226         if (diag) {
1227                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1228                 return -EIO;
1229         }
1230
1231         /* Reset the hw statistics */
1232         ixgbe_dev_stats_reset(eth_dev);
1233
1234         /* disable interrupt */
1235         ixgbe_disable_intr(hw);
1236
1237         /* reset mappings for queue statistics hw counters*/
1238         ixgbe_reset_qstat_mappings(hw);
1239
1240         /* Allocate memory for storing MAC addresses */
1241         eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN *
1242                                                hw->mac.num_rar_entries, 0);
1243         if (eth_dev->data->mac_addrs == NULL) {
1244                 PMD_INIT_LOG(ERR,
1245                              "Failed to allocate %u bytes needed to store "
1246                              "MAC addresses",
1247                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1248                 return -ENOMEM;
1249         }
1250         /* Copy the permanent MAC address */
1251         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1252                         &eth_dev->data->mac_addrs[0]);
1253
1254         /* Allocate memory for storing hash filter MAC addresses */
1255         eth_dev->data->hash_mac_addrs = rte_zmalloc(
1256                 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0);
1257         if (eth_dev->data->hash_mac_addrs == NULL) {
1258                 PMD_INIT_LOG(ERR,
1259                              "Failed to allocate %d bytes needed to store MAC addresses",
1260                              RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1261                 return -ENOMEM;
1262         }
1263
1264         /* Pass the information to the rte_eth_dev_close() that it should also
1265          * release the private port resources.
1266          */
1267         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1268
1269         /* initialize the vfta */
1270         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1271
1272         /* initialize the hw strip bitmap*/
1273         memset(hwstrip, 0, sizeof(*hwstrip));
1274
1275         /* initialize PF if max_vfs not zero */
1276         ixgbe_pf_host_init(eth_dev);
1277
1278         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1279         /* let hardware know driver is loaded */
1280         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1281         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1282         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1283         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1284         IXGBE_WRITE_FLUSH(hw);
1285
1286         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1287                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1288                              (int) hw->mac.type, (int) hw->phy.type,
1289                              (int) hw->phy.sfp_type);
1290         else
1291                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1292                              (int) hw->mac.type, (int) hw->phy.type);
1293
1294         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1295                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1296                      pci_dev->id.device_id);
1297
1298         rte_intr_callback_register(intr_handle,
1299                                    ixgbe_dev_interrupt_handler, eth_dev);
1300
1301         /* enable uio/vfio intr/eventfd mapping */
1302         rte_intr_enable(intr_handle);
1303
1304         /* enable support intr */
1305         ixgbe_enable_intr(eth_dev);
1306
1307         ixgbe_dev_set_link_down(eth_dev);
1308
1309         /* initialize filter info */
1310         memset(filter_info, 0,
1311                sizeof(struct ixgbe_filter_info));
1312
1313         /* initialize 5tuple filter list */
1314         TAILQ_INIT(&filter_info->fivetuple_list);
1315
1316         /* initialize flow director filter list & hash */
1317         ixgbe_fdir_filter_init(eth_dev);
1318
1319         /* initialize l2 tunnel filter list & hash */
1320         ixgbe_l2_tn_filter_init(eth_dev);
1321
1322         /* initialize flow filter lists */
1323         ixgbe_filterlist_init();
1324
1325         /* initialize bandwidth configuration info */
1326         memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1327
1328         /* initialize Traffic Manager configuration */
1329         ixgbe_tm_conf_init(eth_dev);
1330
1331         return 0;
1332 }
1333
1334 static int
1335 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1336 {
1337         PMD_INIT_FUNC_TRACE();
1338
1339         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1340                 return 0;
1341
1342         ixgbe_dev_close(eth_dev);
1343
1344         return 0;
1345 }
1346
1347 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1348 {
1349         struct ixgbe_filter_info *filter_info =
1350                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1351         struct ixgbe_5tuple_filter *p_5tuple;
1352
1353         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1354                 TAILQ_REMOVE(&filter_info->fivetuple_list,
1355                              p_5tuple,
1356                              entries);
1357                 rte_free(p_5tuple);
1358         }
1359         memset(filter_info->fivetuple_mask, 0,
1360                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1361
1362         return 0;
1363 }
1364
1365 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1366 {
1367         struct ixgbe_hw_fdir_info *fdir_info =
1368                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1369         struct ixgbe_fdir_filter *fdir_filter;
1370
1371                 if (fdir_info->hash_map)
1372                 rte_free(fdir_info->hash_map);
1373         if (fdir_info->hash_handle)
1374                 rte_hash_free(fdir_info->hash_handle);
1375
1376         while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1377                 TAILQ_REMOVE(&fdir_info->fdir_list,
1378                              fdir_filter,
1379                              entries);
1380                 rte_free(fdir_filter);
1381         }
1382
1383         return 0;
1384 }
1385
1386 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1387 {
1388         struct ixgbe_l2_tn_info *l2_tn_info =
1389                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1390         struct ixgbe_l2_tn_filter *l2_tn_filter;
1391
1392         if (l2_tn_info->hash_map)
1393                 rte_free(l2_tn_info->hash_map);
1394         if (l2_tn_info->hash_handle)
1395                 rte_hash_free(l2_tn_info->hash_handle);
1396
1397         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1398                 TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1399                              l2_tn_filter,
1400                              entries);
1401                 rte_free(l2_tn_filter);
1402         }
1403
1404         return 0;
1405 }
1406
1407 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1408 {
1409         struct ixgbe_hw_fdir_info *fdir_info =
1410                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1411         char fdir_hash_name[RTE_HASH_NAMESIZE];
1412         struct rte_hash_parameters fdir_hash_params = {
1413                 .name = fdir_hash_name,
1414                 .entries = IXGBE_MAX_FDIR_FILTER_NUM,
1415                 .key_len = sizeof(union ixgbe_atr_input),
1416                 .hash_func = rte_hash_crc,
1417                 .hash_func_init_val = 0,
1418                 .socket_id = rte_socket_id(),
1419         };
1420
1421         TAILQ_INIT(&fdir_info->fdir_list);
1422         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1423                  "fdir_%s", eth_dev->device->name);
1424         fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1425         if (!fdir_info->hash_handle) {
1426                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1427                 return -EINVAL;
1428         }
1429         fdir_info->hash_map = rte_zmalloc("ixgbe",
1430                                           sizeof(struct ixgbe_fdir_filter *) *
1431                                           IXGBE_MAX_FDIR_FILTER_NUM,
1432                                           0);
1433         if (!fdir_info->hash_map) {
1434                 PMD_INIT_LOG(ERR,
1435                              "Failed to allocate memory for fdir hash map!");
1436                 return -ENOMEM;
1437         }
1438         fdir_info->mask_added = FALSE;
1439
1440         return 0;
1441 }
1442
1443 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1444 {
1445         struct ixgbe_l2_tn_info *l2_tn_info =
1446                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1447         char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1448         struct rte_hash_parameters l2_tn_hash_params = {
1449                 .name = l2_tn_hash_name,
1450                 .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1451                 .key_len = sizeof(struct ixgbe_l2_tn_key),
1452                 .hash_func = rte_hash_crc,
1453                 .hash_func_init_val = 0,
1454                 .socket_id = rte_socket_id(),
1455         };
1456
1457         TAILQ_INIT(&l2_tn_info->l2_tn_list);
1458         snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1459                  "l2_tn_%s", eth_dev->device->name);
1460         l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1461         if (!l2_tn_info->hash_handle) {
1462                 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1463                 return -EINVAL;
1464         }
1465         l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1466                                    sizeof(struct ixgbe_l2_tn_filter *) *
1467                                    IXGBE_MAX_L2_TN_FILTER_NUM,
1468                                    0);
1469         if (!l2_tn_info->hash_map) {
1470                 PMD_INIT_LOG(ERR,
1471                         "Failed to allocate memory for L2 TN hash map!");
1472                 return -ENOMEM;
1473         }
1474         l2_tn_info->e_tag_en = FALSE;
1475         l2_tn_info->e_tag_fwd_en = FALSE;
1476         l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
1477
1478         return 0;
1479 }
1480 /*
1481  * Negotiate mailbox API version with the PF.
1482  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1483  * Then we try to negotiate starting with the most recent one.
1484  * If all negotiation attempts fail, then we will proceed with
1485  * the default one (ixgbe_mbox_api_10).
1486  */
1487 static void
1488 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1489 {
1490         int32_t i;
1491
1492         /* start with highest supported, proceed down */
1493         static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1494                 ixgbe_mbox_api_13,
1495                 ixgbe_mbox_api_12,
1496                 ixgbe_mbox_api_11,
1497                 ixgbe_mbox_api_10,
1498         };
1499
1500         for (i = 0;
1501                         i != RTE_DIM(sup_ver) &&
1502                         ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1503                         i++)
1504                 ;
1505 }
1506
1507 static void
1508 generate_random_mac_addr(struct rte_ether_addr *mac_addr)
1509 {
1510         uint64_t random;
1511
1512         /* Set Organizationally Unique Identifier (OUI) prefix. */
1513         mac_addr->addr_bytes[0] = 0x00;
1514         mac_addr->addr_bytes[1] = 0x09;
1515         mac_addr->addr_bytes[2] = 0xC0;
1516         /* Force indication of locally assigned MAC address. */
1517         mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1518         /* Generate the last 3 bytes of the MAC address with a random number. */
1519         random = rte_rand();
1520         memcpy(&mac_addr->addr_bytes[3], &random, 3);
1521 }
1522
1523 static int
1524 devarg_handle_int(__rte_unused const char *key, const char *value,
1525                   void *extra_args)
1526 {
1527         uint16_t *n = extra_args;
1528
1529         if (value == NULL || extra_args == NULL)
1530                 return -EINVAL;
1531
1532         *n = (uint16_t)strtoul(value, NULL, 0);
1533         if (*n == USHRT_MAX && errno == ERANGE)
1534                 return -1;
1535
1536         return 0;
1537 }
1538
1539 static void
1540 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter,
1541                       struct rte_devargs *devargs)
1542 {
1543         struct rte_kvargs *kvlist;
1544         uint16_t pflink_fullchk;
1545
1546         if (devargs == NULL)
1547                 return;
1548
1549         kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments);
1550         if (kvlist == NULL)
1551                 return;
1552
1553         if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 &&
1554             rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK,
1555                                devarg_handle_int, &pflink_fullchk) == 0 &&
1556             pflink_fullchk == 1)
1557                 adapter->pflink_fullchk = 1;
1558
1559         rte_kvargs_free(kvlist);
1560 }
1561
1562 /*
1563  * Virtual Function device init
1564  */
1565 static int
1566 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1567 {
1568         int diag;
1569         uint32_t tc, tcs;
1570         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1571         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1572         struct ixgbe_hw *hw =
1573                 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1574         struct ixgbe_vfta *shadow_vfta =
1575                 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1576         struct ixgbe_hwstrip *hwstrip =
1577                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1578         struct rte_ether_addr *perm_addr =
1579                 (struct rte_ether_addr *)hw->mac.perm_addr;
1580
1581         PMD_INIT_FUNC_TRACE();
1582
1583         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1584         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1585         eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1586
1587         /* for secondary processes, we don't initialise any further as primary
1588          * has already done this work. Only check we don't need a different
1589          * RX function
1590          */
1591         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1592                 struct ixgbe_tx_queue *txq;
1593                 /* TX queue function in primary, set by last queue initialized
1594                  * Tx queue may not initialized by primary process
1595                  */
1596                 if (eth_dev->data->tx_queues) {
1597                         txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1598                         ixgbe_set_tx_function(eth_dev, txq);
1599                 } else {
1600                         /* Use default TX function if we get here */
1601                         PMD_INIT_LOG(NOTICE,
1602                                      "No TX queues configured yet. Using default TX function.");
1603                 }
1604
1605                 ixgbe_set_rx_function(eth_dev);
1606
1607                 return 0;
1608         }
1609
1610         ixgbevf_parse_devargs(eth_dev->data->dev_private,
1611                               pci_dev->device.devargs);
1612
1613         rte_eth_copy_pci_info(eth_dev, pci_dev);
1614
1615         hw->device_id = pci_dev->id.device_id;
1616         hw->vendor_id = pci_dev->id.vendor_id;
1617         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1618
1619         /* initialize the vfta */
1620         memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1621
1622         /* initialize the hw strip bitmap*/
1623         memset(hwstrip, 0, sizeof(*hwstrip));
1624
1625         /* Initialize the shared code (base driver) */
1626         diag = ixgbe_init_shared_code(hw);
1627         if (diag != IXGBE_SUCCESS) {
1628                 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1629                 return -EIO;
1630         }
1631
1632         /* init_mailbox_params */
1633         hw->mbx.ops.init_params(hw);
1634
1635         /* Reset the hw statistics */
1636         ixgbevf_dev_stats_reset(eth_dev);
1637
1638         /* Disable the interrupts for VF */
1639         ixgbevf_intr_disable(eth_dev);
1640
1641         hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1642         diag = hw->mac.ops.reset_hw(hw);
1643
1644         /*
1645          * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1646          * the underlying PF driver has not assigned a MAC address to the VF.
1647          * In this case, assign a random MAC address.
1648          */
1649         if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1650                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1651                 /*
1652                  * This error code will be propagated to the app by
1653                  * rte_eth_dev_reset, so use a public error code rather than
1654                  * the internal-only IXGBE_ERR_RESET_FAILED
1655                  */
1656                 return -EAGAIN;
1657         }
1658
1659         /* negotiate mailbox API version to use with the PF. */
1660         ixgbevf_negotiate_api(hw);
1661
1662         /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1663         ixgbevf_get_queues(hw, &tcs, &tc);
1664
1665         /* Allocate memory for storing MAC addresses */
1666         eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN *
1667                                                hw->mac.num_rar_entries, 0);
1668         if (eth_dev->data->mac_addrs == NULL) {
1669                 PMD_INIT_LOG(ERR,
1670                              "Failed to allocate %u bytes needed to store "
1671                              "MAC addresses",
1672                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1673                 return -ENOMEM;
1674         }
1675
1676         /* Pass the information to the rte_eth_dev_close() that it should also
1677          * release the private port resources.
1678          */
1679         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1680
1681         /* Generate a random MAC address, if none was assigned by PF. */
1682         if (rte_is_zero_ether_addr(perm_addr)) {
1683                 generate_random_mac_addr(perm_addr);
1684                 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1685                 if (diag) {
1686                         rte_free(eth_dev->data->mac_addrs);
1687                         eth_dev->data->mac_addrs = NULL;
1688                         return diag;
1689                 }
1690                 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1691                 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1692                              "%02x:%02x:%02x:%02x:%02x:%02x",
1693                              perm_addr->addr_bytes[0],
1694                              perm_addr->addr_bytes[1],
1695                              perm_addr->addr_bytes[2],
1696                              perm_addr->addr_bytes[3],
1697                              perm_addr->addr_bytes[4],
1698                              perm_addr->addr_bytes[5]);
1699         }
1700
1701         /* Copy the permanent MAC address */
1702         rte_ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1703
1704         /* reset the hardware with the new settings */
1705         diag = hw->mac.ops.start_hw(hw);
1706         switch (diag) {
1707         case  0:
1708                 break;
1709
1710         default:
1711                 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1712                 return -EIO;
1713         }
1714
1715         rte_intr_callback_register(intr_handle,
1716                                    ixgbevf_dev_interrupt_handler, eth_dev);
1717         rte_intr_enable(intr_handle);
1718         ixgbevf_intr_enable(eth_dev);
1719
1720         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1721                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1722                      pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1723
1724         return 0;
1725 }
1726
1727 /* Virtual Function device uninit */
1728
1729 static int
1730 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1731 {
1732         PMD_INIT_FUNC_TRACE();
1733
1734         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1735                 return 0;
1736
1737         ixgbevf_dev_close(eth_dev);
1738
1739         return 0;
1740 }
1741
1742 static int
1743 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1744                 struct rte_pci_device *pci_dev)
1745 {
1746         char name[RTE_ETH_NAME_MAX_LEN];
1747         struct rte_eth_dev *pf_ethdev;
1748         struct rte_eth_devargs eth_da;
1749         int i, retval;
1750
1751         if (pci_dev->device.devargs) {
1752                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1753                                 &eth_da);
1754                 if (retval)
1755                         return retval;
1756         } else
1757                 memset(&eth_da, 0, sizeof(eth_da));
1758
1759         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1760                 sizeof(struct ixgbe_adapter),
1761                 eth_dev_pci_specific_init, pci_dev,
1762                 eth_ixgbe_dev_init, NULL);
1763
1764         if (retval || eth_da.nb_representor_ports < 1)
1765                 return retval;
1766
1767         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1768         if (pf_ethdev == NULL)
1769                 return -ENODEV;
1770
1771         /* probe VF representor ports */
1772         for (i = 0; i < eth_da.nb_representor_ports; i++) {
1773                 struct ixgbe_vf_info *vfinfo;
1774                 struct ixgbe_vf_representor representor;
1775
1776                 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
1777                         pf_ethdev->data->dev_private);
1778                 if (vfinfo == NULL) {
1779                         PMD_DRV_LOG(ERR,
1780                                 "no virtual functions supported by PF");
1781                         break;
1782                 }
1783
1784                 representor.vf_id = eth_da.representor_ports[i];
1785                 representor.switch_domain_id = vfinfo->switch_domain_id;
1786                 representor.pf_ethdev = pf_ethdev;
1787
1788                 /* representor port net_bdf_port */
1789                 snprintf(name, sizeof(name), "net_%s_representor_%d",
1790                         pci_dev->device.name,
1791                         eth_da.representor_ports[i]);
1792
1793                 retval = rte_eth_dev_create(&pci_dev->device, name,
1794                         sizeof(struct ixgbe_vf_representor), NULL, NULL,
1795                         ixgbe_vf_representor_init, &representor);
1796
1797                 if (retval)
1798                         PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
1799                                 "representor %s.", name);
1800         }
1801
1802         return 0;
1803 }
1804
1805 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1806 {
1807         struct rte_eth_dev *ethdev;
1808
1809         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1810         if (!ethdev)
1811                 return 0;
1812
1813         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1814                 return rte_eth_dev_pci_generic_remove(pci_dev,
1815                                         ixgbe_vf_representor_uninit);
1816         else
1817                 return rte_eth_dev_pci_generic_remove(pci_dev,
1818                                                 eth_ixgbe_dev_uninit);
1819 }
1820
1821 static struct rte_pci_driver rte_ixgbe_pmd = {
1822         .id_table = pci_id_ixgbe_map,
1823         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1824         .probe = eth_ixgbe_pci_probe,
1825         .remove = eth_ixgbe_pci_remove,
1826 };
1827
1828 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1829         struct rte_pci_device *pci_dev)
1830 {
1831         return rte_eth_dev_pci_generic_probe(pci_dev,
1832                 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1833 }
1834
1835 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1836 {
1837         return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1838 }
1839
1840 /*
1841  * virtual function driver struct
1842  */
1843 static struct rte_pci_driver rte_ixgbevf_pmd = {
1844         .id_table = pci_id_ixgbevf_map,
1845         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1846         .probe = eth_ixgbevf_pci_probe,
1847         .remove = eth_ixgbevf_pci_remove,
1848 };
1849
1850 static int
1851 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1852 {
1853         struct ixgbe_hw *hw =
1854                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1855         struct ixgbe_vfta *shadow_vfta =
1856                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1857         uint32_t vfta;
1858         uint32_t vid_idx;
1859         uint32_t vid_bit;
1860
1861         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1862         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1863         vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1864         if (on)
1865                 vfta |= vid_bit;
1866         else
1867                 vfta &= ~vid_bit;
1868         IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1869
1870         /* update local VFTA copy */
1871         shadow_vfta->vfta[vid_idx] = vfta;
1872
1873         return 0;
1874 }
1875
1876 static void
1877 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1878 {
1879         if (on)
1880                 ixgbe_vlan_hw_strip_enable(dev, queue);
1881         else
1882                 ixgbe_vlan_hw_strip_disable(dev, queue);
1883 }
1884
1885 static int
1886 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1887                     enum rte_vlan_type vlan_type,
1888                     uint16_t tpid)
1889 {
1890         struct ixgbe_hw *hw =
1891                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1892         int ret = 0;
1893         uint32_t reg;
1894         uint32_t qinq;
1895
1896         qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1897         qinq &= IXGBE_DMATXCTL_GDV;
1898
1899         switch (vlan_type) {
1900         case ETH_VLAN_TYPE_INNER:
1901                 if (qinq) {
1902                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1903                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1904                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1905                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1906                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1907                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1908                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1909                 } else {
1910                         ret = -ENOTSUP;
1911                         PMD_DRV_LOG(ERR, "Inner type is not supported"
1912                                     " by single VLAN");
1913                 }
1914                 break;
1915         case ETH_VLAN_TYPE_OUTER:
1916                 if (qinq) {
1917                         /* Only the high 16-bits is valid */
1918                         IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1919                                         IXGBE_EXVET_VET_EXT_SHIFT);
1920                 } else {
1921                         reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1922                         reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1923                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1924                         reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1925                         reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1926                                 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1927                         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1928                 }
1929
1930                 break;
1931         default:
1932                 ret = -EINVAL;
1933                 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1934                 break;
1935         }
1936
1937         return ret;
1938 }
1939
1940 void
1941 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1942 {
1943         struct ixgbe_hw *hw =
1944                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1945         uint32_t vlnctrl;
1946
1947         PMD_INIT_FUNC_TRACE();
1948
1949         /* Filter Table Disable */
1950         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1951         vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1952
1953         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1954 }
1955
1956 void
1957 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1958 {
1959         struct ixgbe_hw *hw =
1960                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1961         struct ixgbe_vfta *shadow_vfta =
1962                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1963         uint32_t vlnctrl;
1964         uint16_t i;
1965
1966         PMD_INIT_FUNC_TRACE();
1967
1968         /* Filter Table Enable */
1969         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1970         vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1971         vlnctrl |= IXGBE_VLNCTRL_VFE;
1972
1973         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1974
1975         /* write whatever is in local vfta copy */
1976         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1977                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1978 }
1979
1980 static void
1981 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1982 {
1983         struct ixgbe_hwstrip *hwstrip =
1984                 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1985         struct ixgbe_rx_queue *rxq;
1986
1987         if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1988                 return;
1989
1990         if (on)
1991                 IXGBE_SET_HWSTRIP(hwstrip, queue);
1992         else
1993                 IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1994
1995         if (queue >= dev->data->nb_rx_queues)
1996                 return;
1997
1998         rxq = dev->data->rx_queues[queue];
1999
2000         if (on) {
2001                 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2002                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2003         } else {
2004                 rxq->vlan_flags = PKT_RX_VLAN;
2005                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2006         }
2007 }
2008
2009 static void
2010 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
2011 {
2012         struct ixgbe_hw *hw =
2013                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2014         uint32_t ctrl;
2015
2016         PMD_INIT_FUNC_TRACE();
2017
2018         if (hw->mac.type == ixgbe_mac_82598EB) {
2019                 /* No queue level support */
2020                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2021                 return;
2022         }
2023
2024         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2025         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2026         ctrl &= ~IXGBE_RXDCTL_VME;
2027         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2028
2029         /* record those setting for HW strip per queue */
2030         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
2031 }
2032
2033 static void
2034 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
2035 {
2036         struct ixgbe_hw *hw =
2037                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2038         uint32_t ctrl;
2039
2040         PMD_INIT_FUNC_TRACE();
2041
2042         if (hw->mac.type == ixgbe_mac_82598EB) {
2043                 /* No queue level supported */
2044                 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2045                 return;
2046         }
2047
2048         /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2049         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2050         ctrl |= IXGBE_RXDCTL_VME;
2051         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2052
2053         /* record those setting for HW strip per queue */
2054         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2055 }
2056
2057 static void
2058 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2059 {
2060         struct ixgbe_hw *hw =
2061                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2062         uint32_t ctrl;
2063
2064         PMD_INIT_FUNC_TRACE();
2065
2066         /* DMATXCTRL: Geric Double VLAN Disable */
2067         ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2068         ctrl &= ~IXGBE_DMATXCTL_GDV;
2069         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2070
2071         /* CTRL_EXT: Global Double VLAN Disable */
2072         ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2073         ctrl &= ~IXGBE_EXTENDED_VLAN;
2074         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2075
2076 }
2077
2078 static void
2079 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2080 {
2081         struct ixgbe_hw *hw =
2082                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2083         uint32_t ctrl;
2084
2085         PMD_INIT_FUNC_TRACE();
2086
2087         /* DMATXCTRL: Geric Double VLAN Enable */
2088         ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2089         ctrl |= IXGBE_DMATXCTL_GDV;
2090         IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2091
2092         /* CTRL_EXT: Global Double VLAN Enable */
2093         ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2094         ctrl |= IXGBE_EXTENDED_VLAN;
2095         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2096
2097         /* Clear pooling mode of PFVTCTL. It's required by X550. */
2098         if (hw->mac.type == ixgbe_mac_X550 ||
2099             hw->mac.type == ixgbe_mac_X550EM_x ||
2100             hw->mac.type == ixgbe_mac_X550EM_a) {
2101                 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2102                 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2103                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2104         }
2105
2106         /*
2107          * VET EXT field in the EXVET register = 0x8100 by default
2108          * So no need to change. Same to VT field of DMATXCTL register
2109          */
2110 }
2111
2112 void
2113 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
2114 {
2115         struct ixgbe_hw *hw =
2116                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2117         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2118         uint32_t ctrl;
2119         uint16_t i;
2120         struct ixgbe_rx_queue *rxq;
2121         bool on;
2122
2123         PMD_INIT_FUNC_TRACE();
2124
2125         if (hw->mac.type == ixgbe_mac_82598EB) {
2126                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2127                         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2128                         ctrl |= IXGBE_VLNCTRL_VME;
2129                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2130                 } else {
2131                         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2132                         ctrl &= ~IXGBE_VLNCTRL_VME;
2133                         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2134                 }
2135         } else {
2136                 /*
2137                  * Other 10G NIC, the VLAN strip can be setup
2138                  * per queue in RXDCTL
2139                  */
2140                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2141                         rxq = dev->data->rx_queues[i];
2142                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2143                         if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2144                                 ctrl |= IXGBE_RXDCTL_VME;
2145                                 on = TRUE;
2146                         } else {
2147                                 ctrl &= ~IXGBE_RXDCTL_VME;
2148                                 on = FALSE;
2149                         }
2150                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2151
2152                         /* record those setting for HW strip per queue */
2153                         ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
2154                 }
2155         }
2156 }
2157
2158 static void
2159 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
2160 {
2161         uint16_t i;
2162         struct rte_eth_rxmode *rxmode;
2163         struct ixgbe_rx_queue *rxq;
2164
2165         if (mask & ETH_VLAN_STRIP_MASK) {
2166                 rxmode = &dev->data->dev_conf.rxmode;
2167                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2168                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2169                                 rxq = dev->data->rx_queues[i];
2170                                 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2171                         }
2172                 else
2173                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2174                                 rxq = dev->data->rx_queues[i];
2175                                 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2176                         }
2177         }
2178 }
2179
2180 static int
2181 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2182 {
2183         struct rte_eth_rxmode *rxmode;
2184         rxmode = &dev->data->dev_conf.rxmode;
2185
2186         if (mask & ETH_VLAN_STRIP_MASK) {
2187                 ixgbe_vlan_hw_strip_config(dev);
2188         }
2189
2190         if (mask & ETH_VLAN_FILTER_MASK) {
2191                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2192                         ixgbe_vlan_hw_filter_enable(dev);
2193                 else
2194                         ixgbe_vlan_hw_filter_disable(dev);
2195         }
2196
2197         if (mask & ETH_VLAN_EXTEND_MASK) {
2198                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2199                         ixgbe_vlan_hw_extend_enable(dev);
2200                 else
2201                         ixgbe_vlan_hw_extend_disable(dev);
2202         }
2203
2204         return 0;
2205 }
2206
2207 static int
2208 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2209 {
2210         ixgbe_config_vlan_strip_on_all_queues(dev, mask);
2211
2212         ixgbe_vlan_offload_config(dev, mask);
2213
2214         return 0;
2215 }
2216
2217 static void
2218 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2219 {
2220         struct ixgbe_hw *hw =
2221                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2222         /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2223         uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2224
2225         vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2226         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2227 }
2228
2229 static int
2230 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2231 {
2232         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2233
2234         switch (nb_rx_q) {
2235         case 1:
2236         case 2:
2237                 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2238                 break;
2239         case 4:
2240                 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2241                 break;
2242         default:
2243                 return -EINVAL;
2244         }
2245
2246         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
2247                 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2248         RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
2249                 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2250         return 0;
2251 }
2252
2253 static int
2254 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2255 {
2256         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2257         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2258         uint16_t nb_rx_q = dev->data->nb_rx_queues;
2259         uint16_t nb_tx_q = dev->data->nb_tx_queues;
2260
2261         if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2262                 /* check multi-queue mode */
2263                 switch (dev_conf->rxmode.mq_mode) {
2264                 case ETH_MQ_RX_VMDQ_DCB:
2265                         PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2266                         break;
2267                 case ETH_MQ_RX_VMDQ_DCB_RSS:
2268                         /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2269                         PMD_INIT_LOG(ERR, "SRIOV active,"
2270                                         " unsupported mq_mode rx %d.",
2271                                         dev_conf->rxmode.mq_mode);
2272                         return -EINVAL;
2273                 case ETH_MQ_RX_RSS:
2274                 case ETH_MQ_RX_VMDQ_RSS:
2275                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2276                         if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2277                                 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2278                                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2279                                                 " invalid queue number"
2280                                                 " for VMDQ RSS, allowed"
2281                                                 " value are 1, 2 or 4.");
2282                                         return -EINVAL;
2283                                 }
2284                         break;
2285                 case ETH_MQ_RX_VMDQ_ONLY:
2286                 case ETH_MQ_RX_NONE:
2287                         /* if nothing mq mode configure, use default scheme */
2288                         dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2289                         break;
2290                 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2291                         /* SRIOV only works in VMDq enable mode */
2292                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2293                                         " wrong mq_mode rx %d.",
2294                                         dev_conf->rxmode.mq_mode);
2295                         return -EINVAL;
2296                 }
2297
2298                 switch (dev_conf->txmode.mq_mode) {
2299                 case ETH_MQ_TX_VMDQ_DCB:
2300                         PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2301                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2302                         break;
2303                 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2304                         dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2305                         break;
2306                 }
2307
2308                 /* check valid queue number */
2309                 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2310                     (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2311                         PMD_INIT_LOG(ERR, "SRIOV is active,"
2312                                         " nb_rx_q=%d nb_tx_q=%d queue number"
2313                                         " must be less than or equal to %d.",
2314                                         nb_rx_q, nb_tx_q,
2315                                         RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2316                         return -EINVAL;
2317                 }
2318         } else {
2319                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2320                         PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2321                                           " not supported.");
2322                         return -EINVAL;
2323                 }
2324                 /* check configuration for vmdb+dcb mode */
2325                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2326                         const struct rte_eth_vmdq_dcb_conf *conf;
2327
2328                         if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2329                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2330                                                 IXGBE_VMDQ_DCB_NB_QUEUES);
2331                                 return -EINVAL;
2332                         }
2333                         conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2334                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2335                                conf->nb_queue_pools == ETH_32_POOLS)) {
2336                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2337                                                 " nb_queue_pools must be %d or %d.",
2338                                                 ETH_16_POOLS, ETH_32_POOLS);
2339                                 return -EINVAL;
2340                         }
2341                 }
2342                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2343                         const struct rte_eth_vmdq_dcb_tx_conf *conf;
2344
2345                         if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2346                                 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2347                                                  IXGBE_VMDQ_DCB_NB_QUEUES);
2348                                 return -EINVAL;
2349                         }
2350                         conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2351                         if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2352                                conf->nb_queue_pools == ETH_32_POOLS)) {
2353                                 PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2354                                                 " nb_queue_pools != %d and"
2355                                                 " nb_queue_pools != %d.",
2356                                                 ETH_16_POOLS, ETH_32_POOLS);
2357                                 return -EINVAL;
2358                         }
2359                 }
2360
2361                 /* For DCB mode check our configuration before we go further */
2362                 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2363                         const struct rte_eth_dcb_rx_conf *conf;
2364
2365                         conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2366                         if (!(conf->nb_tcs == ETH_4_TCS ||
2367                                conf->nb_tcs == ETH_8_TCS)) {
2368                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2369                                                 " and nb_tcs != %d.",
2370                                                 ETH_4_TCS, ETH_8_TCS);
2371                                 return -EINVAL;
2372                         }
2373                 }
2374
2375                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2376                         const struct rte_eth_dcb_tx_conf *conf;
2377
2378                         conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2379                         if (!(conf->nb_tcs == ETH_4_TCS ||
2380                                conf->nb_tcs == ETH_8_TCS)) {
2381                                 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2382                                                 " and nb_tcs != %d.",
2383                                                 ETH_4_TCS, ETH_8_TCS);
2384                                 return -EINVAL;
2385                         }
2386                 }
2387
2388                 /*
2389                  * When DCB/VT is off, maximum number of queues changes,
2390                  * except for 82598EB, which remains constant.
2391                  */
2392                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2393                                 hw->mac.type != ixgbe_mac_82598EB) {
2394                         if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2395                                 PMD_INIT_LOG(ERR,
2396                                              "Neither VT nor DCB are enabled, "
2397                                              "nb_tx_q > %d.",
2398                                              IXGBE_NONE_MODE_TX_NB_QUEUES);
2399                                 return -EINVAL;
2400                         }
2401                 }
2402         }
2403         return 0;
2404 }
2405
2406 static int
2407 ixgbe_dev_configure(struct rte_eth_dev *dev)
2408 {
2409         struct ixgbe_interrupt *intr =
2410                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2411         struct ixgbe_adapter *adapter = dev->data->dev_private;
2412         int ret;
2413
2414         PMD_INIT_FUNC_TRACE();
2415
2416         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
2417                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
2418
2419         /* multipe queue mode checking */
2420         ret  = ixgbe_check_mq_mode(dev);
2421         if (ret != 0) {
2422                 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2423                             ret);
2424                 return ret;
2425         }
2426
2427         /* set flag to update link status after init */
2428         intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2429
2430         /*
2431          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2432          * allocation or vector Rx preconditions we will reset it.
2433          */
2434         adapter->rx_bulk_alloc_allowed = true;
2435         adapter->rx_vec_allowed = true;
2436
2437         return 0;
2438 }
2439
2440 static void
2441 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2442 {
2443         struct ixgbe_hw *hw =
2444                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2445         struct ixgbe_interrupt *intr =
2446                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2447         uint32_t gpie;
2448
2449         /* only set up it on X550EM_X */
2450         if (hw->mac.type == ixgbe_mac_X550EM_x) {
2451                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2452                 gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2453                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2454                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2455                         intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2456         }
2457 }
2458
2459 int
2460 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2461                         uint16_t tx_rate, uint64_t q_msk)
2462 {
2463         struct ixgbe_hw *hw;
2464         struct ixgbe_vf_info *vfinfo;
2465         struct rte_eth_link link;
2466         uint8_t  nb_q_per_pool;
2467         uint32_t queue_stride;
2468         uint32_t queue_idx, idx = 0, vf_idx;
2469         uint32_t queue_end;
2470         uint16_t total_rate = 0;
2471         struct rte_pci_device *pci_dev;
2472         int ret;
2473
2474         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2475         ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
2476         if (ret < 0)
2477                 return ret;
2478
2479         if (vf >= pci_dev->max_vfs)
2480                 return -EINVAL;
2481
2482         if (tx_rate > link.link_speed)
2483                 return -EINVAL;
2484
2485         if (q_msk == 0)
2486                 return 0;
2487
2488         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2489         vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2490         nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2491         queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2492         queue_idx = vf * queue_stride;
2493         queue_end = queue_idx + nb_q_per_pool - 1;
2494         if (queue_end >= hw->mac.max_tx_queues)
2495                 return -EINVAL;
2496
2497         if (vfinfo) {
2498                 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2499                         if (vf_idx == vf)
2500                                 continue;
2501                         for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2502                                 idx++)
2503                                 total_rate += vfinfo[vf_idx].tx_rate[idx];
2504                 }
2505         } else {
2506                 return -EINVAL;
2507         }
2508
2509         /* Store tx_rate for this vf. */
2510         for (idx = 0; idx < nb_q_per_pool; idx++) {
2511                 if (((uint64_t)0x1 << idx) & q_msk) {
2512                         if (vfinfo[vf].tx_rate[idx] != tx_rate)
2513                                 vfinfo[vf].tx_rate[idx] = tx_rate;
2514                         total_rate += tx_rate;
2515                 }
2516         }
2517
2518         if (total_rate > dev->data->dev_link.link_speed) {
2519                 /* Reset stored TX rate of the VF if it causes exceed
2520                  * link speed.
2521                  */
2522                 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2523                 return -EINVAL;
2524         }
2525
2526         /* Set RTTBCNRC of each queue/pool for vf X  */
2527         for (; queue_idx <= queue_end; queue_idx++) {
2528                 if (0x1 & q_msk)
2529                         ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2530                 q_msk = q_msk >> 1;
2531         }
2532
2533         return 0;
2534 }
2535
2536 /*
2537  * Configure device link speed and setup link.
2538  * It returns 0 on success.
2539  */
2540 static int
2541 ixgbe_dev_start(struct rte_eth_dev *dev)
2542 {
2543         struct ixgbe_hw *hw =
2544                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2545         struct ixgbe_vf_info *vfinfo =
2546                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2547         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2548         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2549         uint32_t intr_vector = 0;
2550         int err;
2551         bool link_up = false, negotiate = 0;
2552         uint32_t speed = 0;
2553         uint32_t allowed_speeds = 0;
2554         int mask = 0;
2555         int status;
2556         uint16_t vf, idx;
2557         uint32_t *link_speeds;
2558         struct ixgbe_tm_conf *tm_conf =
2559                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2560         struct ixgbe_macsec_setting *macsec_setting =
2561                 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
2562
2563         PMD_INIT_FUNC_TRACE();
2564
2565         /* Stop the link setup handler before resetting the HW. */
2566         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
2567
2568         /* disable uio/vfio intr/eventfd mapping */
2569         rte_intr_disable(intr_handle);
2570
2571         /* stop adapter */
2572         hw->adapter_stopped = 0;
2573         ixgbe_stop_adapter(hw);
2574
2575         /* reinitialize adapter
2576          * this calls reset and start
2577          */
2578         status = ixgbe_pf_reset_hw(hw);
2579         if (status != 0)
2580                 return -1;
2581         hw->mac.ops.start_hw(hw);
2582         hw->mac.get_link_status = true;
2583
2584         /* configure PF module if SRIOV enabled */
2585         ixgbe_pf_host_configure(dev);
2586
2587         ixgbe_dev_phy_intr_setup(dev);
2588
2589         /* check and configure queue intr-vector mapping */
2590         if ((rte_intr_cap_multiple(intr_handle) ||
2591              !RTE_ETH_DEV_SRIOV(dev).active) &&
2592             dev->data->dev_conf.intr_conf.rxq != 0) {
2593                 intr_vector = dev->data->nb_rx_queues;
2594                 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2595                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2596                                         IXGBE_MAX_INTR_QUEUE_NUM);
2597                         return -ENOTSUP;
2598                 }
2599                 if (rte_intr_efd_enable(intr_handle, intr_vector))
2600                         return -1;
2601         }
2602
2603         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2604                 intr_handle->intr_vec =
2605                         rte_zmalloc("intr_vec",
2606                                     dev->data->nb_rx_queues * sizeof(int), 0);
2607                 if (intr_handle->intr_vec == NULL) {
2608                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2609                                      " intr_vec", dev->data->nb_rx_queues);
2610                         return -ENOMEM;
2611                 }
2612         }
2613
2614         /* confiugre msix for sleep until rx interrupt */
2615         ixgbe_configure_msix(dev);
2616
2617         /* initialize transmission unit */
2618         ixgbe_dev_tx_init(dev);
2619
2620         /* This can fail when allocating mbufs for descriptor rings */
2621         err = ixgbe_dev_rx_init(dev);
2622         if (err) {
2623                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2624                 goto error;
2625         }
2626
2627         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2628                 ETH_VLAN_EXTEND_MASK;
2629         err = ixgbe_vlan_offload_config(dev, mask);
2630         if (err) {
2631                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2632                 goto error;
2633         }
2634
2635         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2636                 /* Enable vlan filtering for VMDq */
2637                 ixgbe_vmdq_vlan_hw_filter_enable(dev);
2638         }
2639
2640         /* Configure DCB hw */
2641         ixgbe_configure_dcb(dev);
2642
2643         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2644                 err = ixgbe_fdir_configure(dev);
2645                 if (err)
2646                         goto error;
2647         }
2648
2649         /* Restore vf rate limit */
2650         if (vfinfo != NULL) {
2651                 for (vf = 0; vf < pci_dev->max_vfs; vf++)
2652                         for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2653                                 if (vfinfo[vf].tx_rate[idx] != 0)
2654                                         ixgbe_set_vf_rate_limit(
2655                                                 dev, vf,
2656                                                 vfinfo[vf].tx_rate[idx],
2657                                                 1 << idx);
2658         }
2659
2660         ixgbe_restore_statistics_mapping(dev);
2661
2662         err = ixgbe_dev_rxtx_start(dev);
2663         if (err < 0) {
2664                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2665                 goto error;
2666         }
2667
2668         /* Skip link setup if loopback mode is enabled. */
2669         if (dev->data->dev_conf.lpbk_mode != 0) {
2670                 err = ixgbe_check_supported_loopback_mode(dev);
2671                 if (err < 0) {
2672                         PMD_INIT_LOG(ERR, "Unsupported loopback mode");
2673                         goto error;
2674                 } else {
2675                         goto skip_link_setup;
2676                 }
2677         }
2678
2679         if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2680                 err = hw->mac.ops.setup_sfp(hw);
2681                 if (err)
2682                         goto error;
2683         }
2684
2685         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2686                 /* Turn on the copper */
2687                 ixgbe_set_phy_power(hw, true);
2688         } else {
2689                 /* Turn on the laser */
2690                 ixgbe_enable_tx_laser(hw);
2691         }
2692
2693         err = ixgbe_check_link(hw, &speed, &link_up, 0);
2694         if (err)
2695                 goto error;
2696         dev->data->dev_link.link_status = link_up;
2697
2698         err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2699         if (err)
2700                 goto error;
2701
2702         switch (hw->mac.type) {
2703         case ixgbe_mac_X550:
2704         case ixgbe_mac_X550EM_x:
2705         case ixgbe_mac_X550EM_a:
2706                 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2707                         ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
2708                         ETH_LINK_SPEED_10G;
2709                 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
2710                                 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
2711                         allowed_speeds = ETH_LINK_SPEED_10M |
2712                                 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
2713                 break;
2714         default:
2715                 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2716                         ETH_LINK_SPEED_10G;
2717         }
2718
2719         link_speeds = &dev->data->dev_conf.link_speeds;
2720
2721         /* Ignore autoneg flag bit and check the validity of 
2722          * link_speed 
2723          */
2724         if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
2725                 PMD_INIT_LOG(ERR, "Invalid link setting");
2726                 goto error;
2727         }
2728
2729         speed = 0x0;
2730         if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2731                 switch (hw->mac.type) {
2732                 case ixgbe_mac_82598EB:
2733                         speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2734                         break;
2735                 case ixgbe_mac_82599EB:
2736                 case ixgbe_mac_X540:
2737                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2738                         break;
2739                 case ixgbe_mac_X550:
2740                 case ixgbe_mac_X550EM_x:
2741                 case ixgbe_mac_X550EM_a:
2742                         speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2743                         break;
2744                 default:
2745                         speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2746                 }
2747         } else {
2748                 if (*link_speeds & ETH_LINK_SPEED_10G)
2749                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2750                 if (*link_speeds & ETH_LINK_SPEED_5G)
2751                         speed |= IXGBE_LINK_SPEED_5GB_FULL;
2752                 if (*link_speeds & ETH_LINK_SPEED_2_5G)
2753                         speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2754                 if (*link_speeds & ETH_LINK_SPEED_1G)
2755                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2756                 if (*link_speeds & ETH_LINK_SPEED_100M)
2757                         speed |= IXGBE_LINK_SPEED_100_FULL;
2758                 if (*link_speeds & ETH_LINK_SPEED_10M)
2759                         speed |= IXGBE_LINK_SPEED_10_FULL;
2760         }
2761
2762         err = ixgbe_setup_link(hw, speed, link_up);
2763         if (err)
2764                 goto error;
2765
2766 skip_link_setup:
2767
2768         if (rte_intr_allow_others(intr_handle)) {
2769                 /* check if lsc interrupt is enabled */
2770                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2771                         ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2772                 else
2773                         ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2774                 ixgbe_dev_macsec_interrupt_setup(dev);
2775         } else {
2776                 rte_intr_callback_unregister(intr_handle,
2777                                              ixgbe_dev_interrupt_handler, dev);
2778                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2779                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
2780                                      " no intr multiplex");
2781         }
2782
2783         /* check if rxq interrupt is enabled */
2784         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2785             rte_intr_dp_is_en(intr_handle))
2786                 ixgbe_dev_rxq_interrupt_setup(dev);
2787
2788         /* enable uio/vfio intr/eventfd mapping */
2789         rte_intr_enable(intr_handle);
2790
2791         /* resume enabled intr since hw reset */
2792         ixgbe_enable_intr(dev);
2793         ixgbe_l2_tunnel_conf(dev);
2794         ixgbe_filter_restore(dev);
2795
2796         if (tm_conf->root && !tm_conf->committed)
2797                 PMD_DRV_LOG(WARNING,
2798                             "please call hierarchy_commit() "
2799                             "before starting the port");
2800
2801         /* wait for the controller to acquire link */
2802         err = ixgbe_wait_for_link_up(hw);
2803         if (err)
2804                 goto error;
2805
2806         /*
2807          * Update link status right before return, because it may
2808          * start link configuration process in a separate thread.
2809          */
2810         ixgbe_dev_link_update(dev, 0);
2811
2812         /* setup the macsec setting register */
2813         if (macsec_setting->offload_en)
2814                 ixgbe_dev_macsec_register_enable(dev, macsec_setting);
2815
2816         return 0;
2817
2818 error:
2819         PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2820         ixgbe_dev_clear_queues(dev);
2821         return -EIO;
2822 }
2823
2824 /*
2825  * Stop device: disable rx and tx functions to allow for reconfiguring.
2826  */
2827 static void
2828 ixgbe_dev_stop(struct rte_eth_dev *dev)
2829 {
2830         struct rte_eth_link link;
2831         struct ixgbe_adapter *adapter = dev->data->dev_private;
2832         struct ixgbe_hw *hw =
2833                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2834         struct ixgbe_vf_info *vfinfo =
2835                 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2836         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2837         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2838         int vf;
2839         struct ixgbe_tm_conf *tm_conf =
2840                 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2841
2842         if (hw->adapter_stopped)
2843                 return;
2844
2845         PMD_INIT_FUNC_TRACE();
2846
2847         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
2848
2849         /* disable interrupts */
2850         ixgbe_disable_intr(hw);
2851
2852         /* reset the NIC */
2853         ixgbe_pf_reset_hw(hw);
2854         hw->adapter_stopped = 0;
2855
2856         /* stop adapter */
2857         ixgbe_stop_adapter(hw);
2858
2859         for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2860                 vfinfo[vf].clear_to_send = false;
2861
2862         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2863                 /* Turn off the copper */
2864                 ixgbe_set_phy_power(hw, false);
2865         } else {
2866                 /* Turn off the laser */
2867                 ixgbe_disable_tx_laser(hw);
2868         }
2869
2870         ixgbe_dev_clear_queues(dev);
2871
2872         /* Clear stored conf */
2873         dev->data->scattered_rx = 0;
2874         dev->data->lro = 0;
2875
2876         /* Clear recorded link status */
2877         memset(&link, 0, sizeof(link));
2878         rte_eth_linkstatus_set(dev, &link);
2879
2880         if (!rte_intr_allow_others(intr_handle))
2881                 /* resume to the default handler */
2882                 rte_intr_callback_register(intr_handle,
2883                                            ixgbe_dev_interrupt_handler,
2884                                            (void *)dev);
2885
2886         /* Clean datapath event and queue/vec mapping */
2887         rte_intr_efd_disable(intr_handle);
2888         if (intr_handle->intr_vec != NULL) {
2889                 rte_free(intr_handle->intr_vec);
2890                 intr_handle->intr_vec = NULL;
2891         }
2892
2893         /* reset hierarchy commit */
2894         tm_conf->committed = false;
2895
2896         adapter->rss_reta_updated = 0;
2897
2898         hw->adapter_stopped = true;
2899 }
2900
2901 /*
2902  * Set device link up: enable tx.
2903  */
2904 static int
2905 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2906 {
2907         struct ixgbe_hw *hw =
2908                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2909         if (hw->mac.type == ixgbe_mac_82599EB) {
2910 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2911                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2912                         /* Not suported in bypass mode */
2913                         PMD_INIT_LOG(ERR, "Set link up is not supported "
2914                                      "by device id 0x%x", hw->device_id);
2915                         return -ENOTSUP;
2916                 }
2917 #endif
2918         }
2919
2920         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2921                 /* Turn on the copper */
2922                 ixgbe_set_phy_power(hw, true);
2923         } else {
2924                 /* Turn on the laser */
2925                 ixgbe_enable_tx_laser(hw);
2926                 ixgbe_dev_link_update(dev, 0);
2927         }
2928
2929         return 0;
2930 }
2931
2932 /*
2933  * Set device link down: disable tx.
2934  */
2935 static int
2936 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2937 {
2938         struct ixgbe_hw *hw =
2939                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2940         if (hw->mac.type == ixgbe_mac_82599EB) {
2941 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2942                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2943                         /* Not suported in bypass mode */
2944                         PMD_INIT_LOG(ERR, "Set link down is not supported "
2945                                      "by device id 0x%x", hw->device_id);
2946                         return -ENOTSUP;
2947                 }
2948 #endif
2949         }
2950
2951         if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2952                 /* Turn off the copper */
2953                 ixgbe_set_phy_power(hw, false);
2954         } else {
2955                 /* Turn off the laser */
2956                 ixgbe_disable_tx_laser(hw);
2957                 ixgbe_dev_link_update(dev, 0);
2958         }
2959
2960         return 0;
2961 }
2962
2963 /*
2964  * Reset and stop device.
2965  */
2966 static void
2967 ixgbe_dev_close(struct rte_eth_dev *dev)
2968 {
2969         struct ixgbe_hw *hw =
2970                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2971         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2972         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2973         int retries = 0;
2974         int ret;
2975
2976         PMD_INIT_FUNC_TRACE();
2977
2978         ixgbe_pf_reset_hw(hw);
2979
2980         ixgbe_dev_stop(dev);
2981
2982         ixgbe_dev_free_queues(dev);
2983
2984         ixgbe_disable_pcie_master(hw);
2985
2986         /* reprogram the RAR[0] in case user changed it. */
2987         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2988
2989         dev->dev_ops = NULL;
2990         dev->rx_pkt_burst = NULL;
2991         dev->tx_pkt_burst = NULL;
2992
2993         /* Unlock any pending hardware semaphore */
2994         ixgbe_swfw_lock_reset(hw);
2995
2996         /* disable uio intr before callback unregister */
2997         rte_intr_disable(intr_handle);
2998
2999         do {
3000                 ret = rte_intr_callback_unregister(intr_handle,
3001                                 ixgbe_dev_interrupt_handler, dev);
3002                 if (ret >= 0 || ret == -ENOENT) {
3003                         break;
3004                 } else if (ret != -EAGAIN) {
3005                         PMD_INIT_LOG(ERR,
3006                                 "intr callback unregister failed: %d",
3007                                 ret);
3008                 }
3009                 rte_delay_ms(100);
3010         } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
3011
3012         /* cancel the delay handler before remove dev */
3013         rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev);
3014
3015         /* uninitialize PF if max_vfs not zero */
3016         ixgbe_pf_host_uninit(dev);
3017
3018         /* remove all the fdir filters & hash */
3019         ixgbe_fdir_filter_uninit(dev);
3020
3021         /* remove all the L2 tunnel filters & hash */
3022         ixgbe_l2_tn_filter_uninit(dev);
3023
3024         /* Remove all ntuple filters of the device */
3025         ixgbe_ntuple_filter_uninit(dev);
3026
3027         /* clear all the filters list */
3028         ixgbe_filterlist_flush();
3029
3030         /* Remove all Traffic Manager configuration */
3031         ixgbe_tm_conf_uninit(dev);
3032
3033 #ifdef RTE_LIBRTE_SECURITY
3034         rte_free(dev->security_ctx);
3035 #endif
3036
3037 }
3038
3039 /*
3040  * Reset PF device.
3041  */
3042 static int
3043 ixgbe_dev_reset(struct rte_eth_dev *dev)
3044 {
3045         int ret;
3046
3047         /* When a DPDK PMD PF begin to reset PF port, it should notify all
3048          * its VF to make them align with it. The detailed notification
3049          * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
3050          * To avoid unexpected behavior in VF, currently reset of PF with
3051          * SR-IOV activation is not supported. It might be supported later.
3052          */
3053         if (dev->data->sriov.active)
3054                 return -ENOTSUP;
3055
3056         ret = eth_ixgbe_dev_uninit(dev);
3057         if (ret)
3058                 return ret;
3059
3060         ret = eth_ixgbe_dev_init(dev, NULL);
3061
3062         return ret;
3063 }
3064
3065 static void
3066 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
3067                            struct ixgbe_hw_stats *hw_stats,
3068                            struct ixgbe_macsec_stats *macsec_stats,
3069                            uint64_t *total_missed_rx, uint64_t *total_qbrc,
3070                            uint64_t *total_qprc, uint64_t *total_qprdc)
3071 {
3072         uint32_t bprc, lxon, lxoff, total;
3073         uint32_t delta_gprc = 0;
3074         unsigned i;
3075         /* Workaround for RX byte count not including CRC bytes when CRC
3076          * strip is enabled. CRC bytes are removed from counters when crc_strip
3077          * is disabled.
3078          */
3079         int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
3080                         IXGBE_HLREG0_RXCRCSTRP);
3081
3082         hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3083         hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3084         hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3085         hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3086
3087         for (i = 0; i < 8; i++) {
3088                 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3089
3090                 /* global total per queue */
3091                 hw_stats->mpc[i] += mp;
3092                 /* Running comprehensive total for stats display */
3093                 *total_missed_rx += hw_stats->mpc[i];
3094                 if (hw->mac.type == ixgbe_mac_82598EB) {
3095                         hw_stats->rnbc[i] +=
3096                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3097                         hw_stats->pxonrxc[i] +=
3098                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3099                         hw_stats->pxoffrxc[i] +=
3100                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3101                 } else {
3102                         hw_stats->pxonrxc[i] +=
3103                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3104                         hw_stats->pxoffrxc[i] +=
3105                                 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
3106                         hw_stats->pxon2offc[i] +=
3107                                 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3108                 }
3109                 hw_stats->pxontxc[i] +=
3110                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3111                 hw_stats->pxofftxc[i] +=
3112                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3113         }
3114         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3115                 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3116                 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3117                 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3118
3119                 delta_gprc += delta_qprc;
3120
3121                 hw_stats->qprc[i] += delta_qprc;
3122                 hw_stats->qptc[i] += delta_qptc;
3123
3124                 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
3125                 hw_stats->qbrc[i] +=
3126                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
3127                 if (crc_strip == 0)
3128                         hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN;
3129
3130                 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
3131                 hw_stats->qbtc[i] +=
3132                     ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
3133
3134                 hw_stats->qprdc[i] += delta_qprdc;
3135                 *total_qprdc += hw_stats->qprdc[i];
3136
3137                 *total_qprc += hw_stats->qprc[i];
3138                 *total_qbrc += hw_stats->qbrc[i];
3139         }
3140         hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3141         hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3142         hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3143
3144         /*
3145          * An errata states that gprc actually counts good + missed packets:
3146          * Workaround to set gprc to summated queue packet receives
3147          */
3148         hw_stats->gprc = *total_qprc;
3149
3150         if (hw->mac.type != ixgbe_mac_82598EB) {
3151                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3152                 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3153                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3154                 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3155                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3156                 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3157                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3158                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3159         } else {
3160                 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3161                 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3162                 /* 82598 only has a counter in the high register */
3163                 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3164                 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3165                 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3166         }
3167         uint64_t old_tpr = hw_stats->tpr;
3168
3169         hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3170         hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3171
3172         if (crc_strip == 0)
3173                 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN;
3174
3175         uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3176         hw_stats->gptc += delta_gptc;
3177         hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN;
3178         hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
3179
3180         /*
3181          * Workaround: mprc hardware is incorrectly counting
3182          * broadcasts, so for now we subtract those.
3183          */
3184         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3185         hw_stats->bprc += bprc;
3186         hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3187         if (hw->mac.type == ixgbe_mac_82598EB)
3188                 hw_stats->mprc -= bprc;
3189
3190         hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3191         hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3192         hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3193         hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3194         hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3195         hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3196
3197         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3198         hw_stats->lxontxc += lxon;
3199         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3200         hw_stats->lxofftxc += lxoff;
3201         total = lxon + lxoff;
3202
3203         hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3204         hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3205         hw_stats->gptc -= total;
3206         hw_stats->mptc -= total;
3207         hw_stats->ptc64 -= total;
3208         hw_stats->gotc -= total * RTE_ETHER_MIN_LEN;
3209
3210         hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3211         hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3212         hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3213         hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3214         hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3215         hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3216         hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3217         hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3218         hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3219         hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3220         hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3221         hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3222         hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3223         hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3224         hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3225         hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3226         /* Only read FCOE on 82599 */
3227         if (hw->mac.type != ixgbe_mac_82598EB) {
3228                 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3229                 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3230                 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3231                 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3232                 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3233         }
3234
3235         /* Flow Director Stats registers */
3236         if (hw->mac.type != ixgbe_mac_82598EB) {
3237                 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3238                 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3239                 hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
3240                                         IXGBE_FDIRUSTAT) & 0xFFFF;
3241                 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
3242                                         IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
3243                 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
3244                                         IXGBE_FDIRFSTAT) & 0xFFFF;
3245                 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
3246                                         IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
3247         }
3248         /* MACsec Stats registers */
3249         macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3250         macsec_stats->out_pkts_encrypted +=
3251                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3252         macsec_stats->out_pkts_protected +=
3253                 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3254         macsec_stats->out_octets_encrypted +=
3255                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3256         macsec_stats->out_octets_protected +=
3257                 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3258         macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3259         macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3260         macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3261         macsec_stats->in_pkts_unknownsci +=
3262                 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3263         macsec_stats->in_octets_decrypted +=
3264                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3265         macsec_stats->in_octets_validated +=
3266                 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3267         macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3268         macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3269         macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3270         for (i = 0; i < 2; i++) {
3271                 macsec_stats->in_pkts_ok +=
3272                         IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3273                 macsec_stats->in_pkts_invalid +=
3274                         IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3275                 macsec_stats->in_pkts_notvalid +=
3276                         IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3277         }
3278         macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3279         macsec_stats->in_pkts_notusingsa +=
3280                 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3281 }
3282
3283 /*
3284  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3285  */
3286 static int
3287 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3288 {
3289         struct ixgbe_hw *hw =
3290                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3291         struct ixgbe_hw_stats *hw_stats =
3292                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3293         struct ixgbe_macsec_stats *macsec_stats =
3294                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3295                                 dev->data->dev_private);
3296         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3297         unsigned i;
3298
3299         total_missed_rx = 0;
3300         total_qbrc = 0;
3301         total_qprc = 0;
3302         total_qprdc = 0;
3303
3304         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3305                         &total_qbrc, &total_qprc, &total_qprdc);
3306
3307         if (stats == NULL)
3308                 return -EINVAL;
3309
3310         /* Fill out the rte_eth_stats statistics structure */
3311         stats->ipackets = total_qprc;
3312         stats->ibytes = total_qbrc;
3313         stats->opackets = hw_stats->gptc;
3314         stats->obytes = hw_stats->gotc;
3315
3316         for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3317                 stats->q_ipackets[i] = hw_stats->qprc[i];
3318                 stats->q_opackets[i] = hw_stats->qptc[i];
3319                 stats->q_ibytes[i] = hw_stats->qbrc[i];
3320                 stats->q_obytes[i] = hw_stats->qbtc[i];
3321                 stats->q_errors[i] = hw_stats->qprdc[i];
3322         }
3323
3324         /* Rx Errors */
3325         stats->imissed  = total_missed_rx;
3326         stats->ierrors  = hw_stats->crcerrs +
3327                           hw_stats->mspdc +
3328                           hw_stats->rlec +
3329                           hw_stats->ruc +
3330                           hw_stats->roc +
3331                           hw_stats->illerrc +
3332                           hw_stats->errbc +
3333                           hw_stats->rfc +
3334                           hw_stats->fccrc +
3335                           hw_stats->fclast;
3336
3337         /* Tx Errors */
3338         stats->oerrors  = 0;
3339         return 0;
3340 }
3341
3342 static int
3343 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3344 {
3345         struct ixgbe_hw_stats *stats =
3346                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3347
3348         /* HW registers are cleared on read */
3349         ixgbe_dev_stats_get(dev, NULL);
3350
3351         /* Reset software totals */
3352         memset(stats, 0, sizeof(*stats));
3353
3354         return 0;
3355 }
3356
3357 /* This function calculates the number of xstats based on the current config */
3358 static unsigned
3359 ixgbe_xstats_calc_num(void) {
3360         return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3361                 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3362                 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3363 }
3364
3365 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3366         struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3367 {
3368         const unsigned cnt_stats = ixgbe_xstats_calc_num();
3369         unsigned stat, i, count;
3370
3371         if (xstats_names != NULL) {
3372                 count = 0;
3373
3374                 /* Note: limit >= cnt_stats checked upstream
3375                  * in rte_eth_xstats_names()
3376                  */
3377
3378                 /* Extended stats from ixgbe_hw_stats */
3379                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3380                         strlcpy(xstats_names[count].name,
3381                                 rte_ixgbe_stats_strings[i].name,
3382                                 sizeof(xstats_names[count].name));
3383                         count++;
3384                 }
3385
3386                 /* MACsec Stats */
3387                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3388                         strlcpy(xstats_names[count].name,
3389                                 rte_ixgbe_macsec_strings[i].name,
3390                                 sizeof(xstats_names[count].name));
3391                         count++;
3392                 }
3393
3394                 /* RX Priority Stats */
3395                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3396                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3397                                 snprintf(xstats_names[count].name,
3398                                         sizeof(xstats_names[count].name),
3399                                         "rx_priority%u_%s", i,
3400                                         rte_ixgbe_rxq_strings[stat].name);
3401                                 count++;
3402                         }
3403                 }
3404
3405                 /* TX Priority Stats */
3406                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3407                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3408                                 snprintf(xstats_names[count].name,
3409                                         sizeof(xstats_names[count].name),
3410                                         "tx_priority%u_%s", i,
3411                                         rte_ixgbe_txq_strings[stat].name);
3412                                 count++;
3413                         }
3414                 }
3415         }
3416         return cnt_stats;
3417 }
3418
3419 static int ixgbe_dev_xstats_get_names_by_id(
3420         struct rte_eth_dev *dev,
3421         struct rte_eth_xstat_name *xstats_names,
3422         const uint64_t *ids,
3423         unsigned int limit)
3424 {
3425         if (!ids) {
3426                 const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3427                 unsigned int stat, i, count;
3428
3429                 if (xstats_names != NULL) {
3430                         count = 0;
3431
3432                         /* Note: limit >= cnt_stats checked upstream
3433                          * in rte_eth_xstats_names()
3434                          */
3435
3436                         /* Extended stats from ixgbe_hw_stats */
3437                         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3438                                 strlcpy(xstats_names[count].name,
3439                                         rte_ixgbe_stats_strings[i].name,
3440                                         sizeof(xstats_names[count].name));
3441                                 count++;
3442                         }
3443
3444                         /* MACsec Stats */
3445                         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3446                                 strlcpy(xstats_names[count].name,
3447                                         rte_ixgbe_macsec_strings[i].name,
3448                                         sizeof(xstats_names[count].name));
3449                                 count++;
3450                         }
3451
3452                         /* RX Priority Stats */
3453                         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3454                                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3455                                         snprintf(xstats_names[count].name,
3456                                             sizeof(xstats_names[count].name),
3457                                             "rx_priority%u_%s", i,
3458                                             rte_ixgbe_rxq_strings[stat].name);
3459                                         count++;
3460                                 }
3461                         }
3462
3463                         /* TX Priority Stats */
3464                         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3465                                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3466                                         snprintf(xstats_names[count].name,
3467                                             sizeof(xstats_names[count].name),
3468                                             "tx_priority%u_%s", i,
3469                                             rte_ixgbe_txq_strings[stat].name);
3470                                         count++;
3471                                 }
3472                         }
3473                 }
3474                 return cnt_stats;
3475         }
3476
3477         uint16_t i;
3478         uint16_t size = ixgbe_xstats_calc_num();
3479         struct rte_eth_xstat_name xstats_names_copy[size];
3480
3481         ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3482                         size);
3483
3484         for (i = 0; i < limit; i++) {
3485                 if (ids[i] >= size) {
3486                         PMD_INIT_LOG(ERR, "id value isn't valid");
3487                         return -1;
3488                 }
3489                 strcpy(xstats_names[i].name,
3490                                 xstats_names_copy[ids[i]].name);
3491         }
3492         return limit;
3493 }
3494
3495 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3496         struct rte_eth_xstat_name *xstats_names, unsigned limit)
3497 {
3498         unsigned i;
3499
3500         if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3501                 return -ENOMEM;
3502
3503         if (xstats_names != NULL)
3504                 for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3505                         strlcpy(xstats_names[i].name,
3506                                 rte_ixgbevf_stats_strings[i].name,
3507                                 sizeof(xstats_names[i].name));
3508         return IXGBEVF_NB_XSTATS;
3509 }
3510
3511 static int
3512 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3513                                          unsigned n)
3514 {
3515         struct ixgbe_hw *hw =
3516                         IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3517         struct ixgbe_hw_stats *hw_stats =
3518                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3519         struct ixgbe_macsec_stats *macsec_stats =
3520                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3521                                 dev->data->dev_private);
3522         uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3523         unsigned i, stat, count = 0;
3524
3525         count = ixgbe_xstats_calc_num();
3526
3527         if (n < count)
3528                 return count;
3529
3530         total_missed_rx = 0;
3531         total_qbrc = 0;
3532         total_qprc = 0;
3533         total_qprdc = 0;
3534
3535         ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3536                         &total_qbrc, &total_qprc, &total_qprdc);
3537
3538         /* If this is a reset xstats is NULL, and we have cleared the
3539          * registers by reading them.
3540          */
3541         if (!xstats)
3542                 return 0;
3543
3544         /* Extended stats from ixgbe_hw_stats */
3545         count = 0;
3546         for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3547                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3548                                 rte_ixgbe_stats_strings[i].offset);
3549                 xstats[count].id = count;
3550                 count++;
3551         }
3552
3553         /* MACsec Stats */
3554         for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3555                 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3556                                 rte_ixgbe_macsec_strings[i].offset);
3557                 xstats[count].id = count;
3558                 count++;
3559         }
3560
3561         /* RX Priority Stats */
3562         for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3563                 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3564                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3565                                         rte_ixgbe_rxq_strings[stat].offset +
3566                                         (sizeof(uint64_t) * i));
3567                         xstats[count].id = count;
3568                         count++;
3569                 }
3570         }
3571
3572         /* TX Priority Stats */
3573         for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3574                 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3575                         xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3576                                         rte_ixgbe_txq_strings[stat].offset +
3577                                         (sizeof(uint64_t) * i));
3578                         xstats[count].id = count;
3579                         count++;
3580                 }
3581         }
3582         return count;
3583 }
3584
3585 static int
3586 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3587                 uint64_t *values, unsigned int n)
3588 {
3589         if (!ids) {
3590                 struct ixgbe_hw *hw =
3591                                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3592                 struct ixgbe_hw_stats *hw_stats =
3593                                 IXGBE_DEV_PRIVATE_TO_STATS(
3594                                                 dev->data->dev_private);
3595                 struct ixgbe_macsec_stats *macsec_stats =
3596                                 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3597                                         dev->data->dev_private);
3598                 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3599                 unsigned int i, stat, count = 0;
3600
3601                 count = ixgbe_xstats_calc_num();
3602
3603                 if (!ids && n < count)
3604                         return count;
3605
3606                 total_missed_rx = 0;
3607                 total_qbrc = 0;
3608                 total_qprc = 0;
3609                 total_qprdc = 0;
3610
3611                 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3612                                 &total_missed_rx, &total_qbrc, &total_qprc,
3613                                 &total_qprdc);
3614
3615                 /* If this is a reset xstats is NULL, and we have cleared the
3616                  * registers by reading them.
3617                  */
3618                 if (!ids && !values)
3619                         return 0;
3620
3621                 /* Extended stats from ixgbe_hw_stats */
3622                 count = 0;
3623                 for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3624                         values[count] = *(uint64_t *)(((char *)hw_stats) +
3625                                         rte_ixgbe_stats_strings[i].offset);
3626                         count++;
3627                 }
3628
3629                 /* MACsec Stats */
3630                 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3631                         values[count] = *(uint64_t *)(((char *)macsec_stats) +
3632                                         rte_ixgbe_macsec_strings[i].offset);
3633                         count++;
3634                 }
3635
3636                 /* RX Priority Stats */
3637                 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3638                         for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3639                                 values[count] =
3640                                         *(uint64_t *)(((char *)hw_stats) +
3641                                         rte_ixgbe_rxq_strings[stat].offset +
3642                                         (sizeof(uint64_t) * i));
3643                                 count++;
3644                         }
3645                 }
3646
3647                 /* TX Priority Stats */
3648                 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3649                         for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3650                                 values[count] =
3651                                         *(uint64_t *)(((char *)hw_stats) +
3652                                         rte_ixgbe_txq_strings[stat].offset +
3653                                         (sizeof(uint64_t) * i));
3654                                 count++;
3655                         }
3656                 }
3657                 return count;
3658         }
3659
3660         uint16_t i;
3661         uint16_t size = ixgbe_xstats_calc_num();
3662         uint64_t values_copy[size];
3663
3664         ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3665
3666         for (i = 0; i < n; i++) {
3667                 if (ids[i] >= size) {
3668                         PMD_INIT_LOG(ERR, "id value isn't valid");
3669                         return -1;
3670                 }
3671                 values[i] = values_copy[ids[i]];
3672         }
3673         return n;
3674 }
3675
3676 static int
3677 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3678 {
3679         struct ixgbe_hw_stats *stats =
3680                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3681         struct ixgbe_macsec_stats *macsec_stats =
3682                         IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3683                                 dev->data->dev_private);
3684
3685         unsigned count = ixgbe_xstats_calc_num();
3686
3687         /* HW registers are cleared on read */
3688         ixgbe_dev_xstats_get(dev, NULL, count);
3689
3690         /* Reset software totals */
3691         memset(stats, 0, sizeof(*stats));
3692         memset(macsec_stats, 0, sizeof(*macsec_stats));
3693
3694         return 0;
3695 }
3696
3697 static void
3698 ixgbevf_update_stats(struct rte_eth_dev *dev)
3699 {
3700         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3701         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3702                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3703
3704         /* Good Rx packet, include VF loopback */
3705         UPDATE_VF_STAT(IXGBE_VFGPRC,
3706             hw_stats->last_vfgprc, hw_stats->vfgprc);
3707
3708         /* Good Rx octets, include VF loopback */
3709         UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3710             hw_stats->last_vfgorc, hw_stats->vfgorc);
3711
3712         /* Good Tx packet, include VF loopback */
3713         UPDATE_VF_STAT(IXGBE_VFGPTC,
3714             hw_stats->last_vfgptc, hw_stats->vfgptc);
3715
3716         /* Good Tx octets, include VF loopback */
3717         UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3718             hw_stats->last_vfgotc, hw_stats->vfgotc);
3719
3720         /* Rx Multicst Packet */
3721         UPDATE_VF_STAT(IXGBE_VFMPRC,
3722             hw_stats->last_vfmprc, hw_stats->vfmprc);
3723 }
3724
3725 static int
3726 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3727                        unsigned n)
3728 {
3729         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3730                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3731         unsigned i;
3732
3733         if (n < IXGBEVF_NB_XSTATS)
3734                 return IXGBEVF_NB_XSTATS;
3735
3736         ixgbevf_update_stats(dev);
3737
3738         if (!xstats)
3739                 return 0;
3740
3741         /* Extended stats */
3742         for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3743                 xstats[i].id = i;
3744                 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3745                         rte_ixgbevf_stats_strings[i].offset);
3746         }
3747
3748         return IXGBEVF_NB_XSTATS;
3749 }
3750
3751 static int
3752 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3753 {
3754         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3755                           IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3756
3757         ixgbevf_update_stats(dev);
3758
3759         if (stats == NULL)
3760                 return -EINVAL;
3761
3762         stats->ipackets = hw_stats->vfgprc;
3763         stats->ibytes = hw_stats->vfgorc;
3764         stats->opackets = hw_stats->vfgptc;
3765         stats->obytes = hw_stats->vfgotc;
3766         return 0;
3767 }
3768
3769 static int
3770 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3771 {
3772         struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3773                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3774
3775         /* Sync HW register to the last stats */
3776         ixgbevf_dev_stats_get(dev, NULL);
3777
3778         /* reset HW current stats*/
3779         hw_stats->vfgprc = 0;
3780         hw_stats->vfgorc = 0;
3781         hw_stats->vfgptc = 0;
3782         hw_stats->vfgotc = 0;
3783
3784         return 0;
3785 }
3786
3787 static int
3788 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3789 {
3790         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3791         u16 eeprom_verh, eeprom_verl;
3792         u32 etrack_id;
3793         int ret;
3794
3795         ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3796         ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3797
3798         etrack_id = (eeprom_verh << 16) | eeprom_verl;
3799         ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3800
3801         ret += 1; /* add the size of '\0' */
3802         if (fw_size < (u32)ret)
3803                 return ret;
3804         else
3805                 return 0;
3806 }
3807
3808 static int
3809 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3810 {
3811         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3812         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3813         struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3814
3815         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3816         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3817         if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3818                 /*
3819                  * When DCB/VT is off, maximum number of queues changes,
3820                  * except for 82598EB, which remains constant.
3821                  */
3822                 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3823                                 hw->mac.type != ixgbe_mac_82598EB)
3824                         dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3825         }
3826         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3827         dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3828         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3829         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3830         dev_info->max_vfs = pci_dev->max_vfs;
3831         if (hw->mac.type == ixgbe_mac_82598EB)
3832                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3833         else
3834                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3835         dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
3836         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3837         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3838         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3839         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3840                                      dev_info->rx_queue_offload_capa);
3841         dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3842         dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3843
3844         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3845                 .rx_thresh = {
3846                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3847                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3848                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3849                 },
3850                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3851                 .rx_drop_en = 0,
3852                 .offloads = 0,
3853         };
3854
3855         dev_info->default_txconf = (struct rte_eth_txconf) {
3856                 .tx_thresh = {
3857                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3858                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3859                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3860                 },
3861                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3862                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3863                 .offloads = 0,
3864         };
3865
3866         dev_info->rx_desc_lim = rx_desc_lim;
3867         dev_info->tx_desc_lim = tx_desc_lim;
3868
3869         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3870         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3871         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3872
3873         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3874         if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
3875                         hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
3876                 dev_info->speed_capa = ETH_LINK_SPEED_10M |
3877                         ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
3878
3879         if (hw->mac.type == ixgbe_mac_X540 ||
3880             hw->mac.type == ixgbe_mac_X540_vf ||
3881             hw->mac.type == ixgbe_mac_X550 ||
3882             hw->mac.type == ixgbe_mac_X550_vf) {
3883                 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3884         }
3885         if (hw->mac.type == ixgbe_mac_X550) {
3886                 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3887                 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3888         }
3889
3890         /* Driver-preferred Rx/Tx parameters */
3891         dev_info->default_rxportconf.burst_size = 32;
3892         dev_info->default_txportconf.burst_size = 32;
3893         dev_info->default_rxportconf.nb_queues = 1;
3894         dev_info->default_txportconf.nb_queues = 1;
3895         dev_info->default_rxportconf.ring_size = 256;
3896         dev_info->default_txportconf.ring_size = 256;
3897
3898         return 0;
3899 }
3900
3901 static const uint32_t *
3902 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3903 {
3904         static const uint32_t ptypes[] = {
3905                 /* For non-vec functions,
3906                  * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3907                  * for vec functions,
3908                  * refers to _recv_raw_pkts_vec().
3909                  */
3910                 RTE_PTYPE_L2_ETHER,
3911                 RTE_PTYPE_L3_IPV4,
3912                 RTE_PTYPE_L3_IPV4_EXT,
3913                 RTE_PTYPE_L3_IPV6,
3914                 RTE_PTYPE_L3_IPV6_EXT,
3915                 RTE_PTYPE_L4_SCTP,
3916                 RTE_PTYPE_L4_TCP,
3917                 RTE_PTYPE_L4_UDP,
3918                 RTE_PTYPE_TUNNEL_IP,
3919                 RTE_PTYPE_INNER_L3_IPV6,
3920                 RTE_PTYPE_INNER_L3_IPV6_EXT,
3921                 RTE_PTYPE_INNER_L4_TCP,
3922                 RTE_PTYPE_INNER_L4_UDP,
3923                 RTE_PTYPE_UNKNOWN
3924         };
3925
3926         if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3927             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3928             dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3929             dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3930                 return ptypes;
3931
3932 #if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON)
3933         if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3934             dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3935                 return ptypes;
3936 #endif
3937         return NULL;
3938 }
3939
3940 static int
3941 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3942                      struct rte_eth_dev_info *dev_info)
3943 {
3944         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3945         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3946
3947         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3948         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3949         dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3950         dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3951         dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
3952         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3953         dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3954         dev_info->max_vfs = pci_dev->max_vfs;
3955         if (hw->mac.type == ixgbe_mac_82598EB)
3956                 dev_info->max_vmdq_pools = ETH_16_POOLS;
3957         else
3958                 dev_info->max_vmdq_pools = ETH_64_POOLS;
3959         dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3960         dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3961                                      dev_info->rx_queue_offload_capa);
3962         dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3963         dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3964         dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3965         dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3966         dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3967
3968         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3969                 .rx_thresh = {
3970                         .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3971                         .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3972                         .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3973                 },
3974                 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3975                 .rx_drop_en = 0,
3976                 .offloads = 0,
3977         };
3978
3979         dev_info->default_txconf = (struct rte_eth_txconf) {
3980                 .tx_thresh = {
3981                         .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3982                         .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3983                         .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3984                 },
3985                 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3986                 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3987                 .offloads = 0,
3988         };
3989
3990         dev_info->rx_desc_lim = rx_desc_lim;
3991         dev_info->tx_desc_lim = tx_desc_lim;
3992
3993         return 0;
3994 }
3995
3996 static int
3997 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3998                    bool *link_up, int wait_to_complete)
3999 {
4000         struct ixgbe_adapter *adapter = container_of(hw,
4001                                                      struct ixgbe_adapter, hw);
4002         struct ixgbe_mbx_info *mbx = &hw->mbx;
4003         struct ixgbe_mac_info *mac = &hw->mac;
4004         uint32_t links_reg, in_msg;
4005         int ret_val = 0;
4006
4007         /* If we were hit with a reset drop the link */
4008         if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
4009                 mac->get_link_status = true;
4010
4011         if (!mac->get_link_status)
4012                 goto out;
4013
4014         /* if link status is down no point in checking to see if pf is up */
4015         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
4016         if (!(links_reg & IXGBE_LINKS_UP))
4017                 goto out;
4018
4019         /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
4020          * before the link status is correct
4021          */
4022         if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
4023                 int i;
4024
4025                 for (i = 0; i < 5; i++) {
4026                         rte_delay_us(100);
4027                         links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
4028
4029                         if (!(links_reg & IXGBE_LINKS_UP))
4030                                 goto out;
4031                 }
4032         }
4033
4034         switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4035         case IXGBE_LINKS_SPEED_10G_82599:
4036                 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4037                 if (hw->mac.type >= ixgbe_mac_X550) {
4038                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4039                                 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4040                 }
4041                 break;
4042         case IXGBE_LINKS_SPEED_1G_82599:
4043                 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4044                 break;
4045         case IXGBE_LINKS_SPEED_100_82599:
4046                 *speed = IXGBE_LINK_SPEED_100_FULL;
4047                 if (hw->mac.type == ixgbe_mac_X550) {
4048                         if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4049                                 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4050                 }
4051                 break;
4052         case IXGBE_LINKS_SPEED_10_X550EM_A:
4053                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4054                 /* Since Reserved in older MAC's */
4055                 if (hw->mac.type >= ixgbe_mac_X550)
4056                         *speed = IXGBE_LINK_SPEED_10_FULL;
4057                 break;
4058         default:
4059                 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4060         }
4061
4062         if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) {
4063                 if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
4064                         mac->get_link_status = true;
4065                 else
4066                         mac->get_link_status = false;
4067
4068                 goto out;
4069         }
4070
4071         /* if the read failed it could just be a mailbox collision, best wait
4072          * until we are called again and don't report an error
4073          */
4074         if (mbx->ops.read(hw, &in_msg, 1, 0))
4075                 goto out;
4076
4077         if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
4078                 /* msg is not CTS and is NACK we must have lost CTS status */
4079                 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
4080                         mac->get_link_status = false;
4081                 goto out;
4082         }
4083
4084         /* the pf is talking, if we timed out in the past we reinit */
4085         if (!mbx->timeout) {
4086                 ret_val = -1;
4087                 goto out;
4088         }
4089
4090         /* if we passed all the tests above then the link is up and we no
4091          * longer need to check for link
4092          */
4093         mac->get_link_status = false;
4094
4095 out:
4096         *link_up = !mac->get_link_status;
4097         return ret_val;
4098 }
4099
4100 static void
4101 ixgbe_dev_setup_link_alarm_handler(void *param)
4102 {
4103         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4104         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4105         struct ixgbe_interrupt *intr =
4106                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4107         u32 speed;
4108         bool autoneg = false;
4109
4110         speed = hw->phy.autoneg_advertised;
4111         if (!speed)
4112                 ixgbe_get_link_capabilities(hw, &speed, &autoneg);
4113
4114         ixgbe_setup_link(hw, speed, true);
4115
4116         intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4117 }
4118
4119 /*
4120  * In freebsd environment, nic_uio drivers do not support interrupts,
4121  * rte_intr_callback_register() will fail to register interrupts.
4122  * We can not make link status to change from down to up by interrupt
4123  * callback. So we need to wait for the controller to acquire link
4124  * when ports start.
4125  * It returns 0 on link up.
4126  */
4127 static int
4128 ixgbe_wait_for_link_up(struct ixgbe_hw *hw)
4129 {
4130 #ifdef RTE_EXEC_ENV_FREEBSD
4131         int err, i;
4132         bool link_up = false;
4133         uint32_t speed = 0;
4134         const int nb_iter = 25;
4135
4136         for (i = 0; i < nb_iter; i++) {
4137                 err = ixgbe_check_link(hw, &speed, &link_up, 0);
4138                 if (err)
4139                         return err;
4140                 if (link_up)
4141                         return 0;
4142                 msec_delay(200);
4143         }
4144
4145         return 0;
4146 #else
4147         RTE_SET_USED(hw);
4148         return 0;
4149 #endif
4150 }
4151
4152 /* return 0 means link status changed, -1 means not changed */
4153 int
4154 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
4155                             int wait_to_complete, int vf)
4156 {
4157         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4158         struct rte_eth_link link;
4159         ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4160         struct ixgbe_interrupt *intr =
4161                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4162         bool link_up;
4163         int diag;
4164         int wait = 1;
4165         u32 esdp_reg;
4166
4167         memset(&link, 0, sizeof(link));
4168         link.link_status = ETH_LINK_DOWN;
4169         link.link_speed = ETH_SPEED_NUM_NONE;
4170         link.link_duplex = ETH_LINK_HALF_DUPLEX;
4171         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
4172                         ETH_LINK_SPEED_FIXED);
4173
4174         hw->mac.get_link_status = true;
4175
4176         if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG)
4177                 return rte_eth_linkstatus_set(dev, &link);
4178
4179         /* check if it needs to wait to complete, if lsc interrupt is enabled */
4180         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
4181                 wait = 0;
4182
4183         if (vf)
4184                 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
4185         else
4186                 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
4187
4188         if (diag != 0) {
4189                 link.link_speed = ETH_SPEED_NUM_100M;
4190                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
4191                 return rte_eth_linkstatus_set(dev, &link);
4192         }
4193
4194         if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
4195                 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
4196                 if ((esdp_reg & IXGBE_ESDP_SDP3))
4197                         link_up = 0;
4198         }
4199
4200         if (link_up == 0) {
4201                 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
4202                         intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
4203                         rte_eal_alarm_set(10,
4204                                 ixgbe_dev_setup_link_alarm_handler, dev);
4205                 }
4206                 return rte_eth_linkstatus_set(dev, &link);
4207         }
4208
4209         link.link_status = ETH_LINK_UP;
4210         link.link_duplex = ETH_LINK_FULL_DUPLEX;
4211
4212         switch (link_speed) {
4213         default:
4214         case IXGBE_LINK_SPEED_UNKNOWN:
4215                 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4216                         hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4217                         link.link_speed = ETH_SPEED_NUM_10M;
4218                 else
4219                         link.link_speed = ETH_SPEED_NUM_100M;
4220                 break;
4221
4222         case IXGBE_LINK_SPEED_100_FULL:
4223                 link.link_speed = ETH_SPEED_NUM_100M;
4224                 break;
4225
4226         case IXGBE_LINK_SPEED_1GB_FULL:
4227                 link.link_speed = ETH_SPEED_NUM_1G;
4228                 break;
4229
4230         case IXGBE_LINK_SPEED_2_5GB_FULL:
4231                 link.link_speed = ETH_SPEED_NUM_2_5G;
4232                 break;
4233
4234         case IXGBE_LINK_SPEED_5GB_FULL:
4235                 link.link_speed = ETH_SPEED_NUM_5G;
4236                 break;
4237
4238         case IXGBE_LINK_SPEED_10GB_FULL:
4239                 link.link_speed = ETH_SPEED_NUM_10G;
4240                 break;
4241         }
4242
4243         return rte_eth_linkstatus_set(dev, &link);
4244 }
4245
4246 static int
4247 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4248 {
4249         return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4250 }
4251
4252 static int
4253 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4254 {
4255         return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4256 }
4257
4258 static int
4259 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4260 {
4261         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4262         uint32_t fctrl;
4263
4264         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4265         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4266         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4267
4268         return 0;
4269 }
4270
4271 static int
4272 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4273 {
4274         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4275         uint32_t fctrl;
4276
4277         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4278         fctrl &= (~IXGBE_FCTRL_UPE);
4279         if (dev->data->all_multicast == 1)
4280                 fctrl |= IXGBE_FCTRL_MPE;
4281         else
4282                 fctrl &= (~IXGBE_FCTRL_MPE);
4283         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4284
4285         return 0;
4286 }
4287
4288 static int
4289 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4290 {
4291         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4292         uint32_t fctrl;
4293
4294         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4295         fctrl |= IXGBE_FCTRL_MPE;
4296         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4297
4298         return 0;
4299 }
4300
4301 static int
4302 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4303 {
4304         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4305         uint32_t fctrl;
4306
4307         if (dev->data->promiscuous == 1)
4308                 return 0; /* must remain in all_multicast mode */
4309
4310         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4311         fctrl &= (~IXGBE_FCTRL_MPE);
4312         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4313
4314         return 0;
4315 }
4316
4317 /**
4318  * It clears the interrupt causes and enables the interrupt.
4319  * It will be called once only during nic initialized.
4320  *
4321  * @param dev
4322  *  Pointer to struct rte_eth_dev.
4323  * @param on
4324  *  Enable or Disable.
4325  *
4326  * @return
4327  *  - On success, zero.
4328  *  - On failure, a negative value.
4329  */
4330 static int
4331 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4332 {
4333         struct ixgbe_interrupt *intr =
4334                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4335
4336         ixgbe_dev_link_status_print(dev);
4337         if (on)
4338                 intr->mask |= IXGBE_EICR_LSC;
4339         else
4340                 intr->mask &= ~IXGBE_EICR_LSC;
4341
4342         return 0;
4343 }
4344
4345 /**
4346  * It clears the interrupt causes and enables the interrupt.
4347  * It will be called once only during nic initialized.
4348  *
4349  * @param dev
4350  *  Pointer to struct rte_eth_dev.
4351  *
4352  * @return
4353  *  - On success, zero.
4354  *  - On failure, a negative value.
4355  */
4356 static int
4357 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4358 {
4359         struct ixgbe_interrupt *intr =
4360                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4361
4362         intr->mask |= IXGBE_EICR_RTX_QUEUE;
4363
4364         return 0;
4365 }
4366
4367 /**
4368  * It clears the interrupt causes and enables the interrupt.
4369  * It will be called once only during nic initialized.
4370  *
4371  * @param dev
4372  *  Pointer to struct rte_eth_dev.
4373  *
4374  * @return
4375  *  - On success, zero.
4376  *  - On failure, a negative value.
4377  */
4378 static int
4379 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4380 {
4381         struct ixgbe_interrupt *intr =
4382                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4383
4384         intr->mask |= IXGBE_EICR_LINKSEC;
4385
4386         return 0;
4387 }
4388
4389 /*
4390  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4391  *
4392  * @param dev
4393  *  Pointer to struct rte_eth_dev.
4394  *
4395  * @return
4396  *  - On success, zero.
4397  *  - On failure, a negative value.
4398  */
4399 static int
4400 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4401 {
4402         uint32_t eicr;
4403         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4404         struct ixgbe_interrupt *intr =
4405                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4406
4407         /* clear all cause mask */
4408         ixgbe_disable_intr(hw);
4409
4410         /* read-on-clear nic registers here */
4411         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4412         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4413
4414         intr->flags = 0;
4415
4416         /* set flag for async link update */
4417         if (eicr & IXGBE_EICR_LSC)
4418                 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4419
4420         if (eicr & IXGBE_EICR_MAILBOX)
4421                 intr->flags |= IXGBE_FLAG_MAILBOX;
4422
4423         if (eicr & IXGBE_EICR_LINKSEC)
4424                 intr->flags |= IXGBE_FLAG_MACSEC;
4425
4426         if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4427             hw->phy.type == ixgbe_phy_x550em_ext_t &&
4428             (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4429                 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4430
4431         return 0;
4432 }
4433
4434 /**
4435  * It gets and then prints the link status.
4436  *
4437  * @param dev
4438  *  Pointer to struct rte_eth_dev.
4439  *
4440  * @return
4441  *  - On success, zero.
4442  *  - On failure, a negative value.
4443  */
4444 static void
4445 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4446 {
4447         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4448         struct rte_eth_link link;
4449
4450         rte_eth_linkstatus_get(dev, &link);
4451
4452         if (link.link_status) {
4453                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4454                                         (int)(dev->data->port_id),
4455                                         (unsigned)link.link_speed,
4456                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4457                                         "full-duplex" : "half-duplex");
4458         } else {
4459                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
4460                                 (int)(dev->data->port_id));
4461         }
4462         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4463                                 pci_dev->addr.domain,
4464                                 pci_dev->addr.bus,
4465                                 pci_dev->addr.devid,
4466                                 pci_dev->addr.function);
4467 }
4468
4469 /*
4470  * It executes link_update after knowing an interrupt occurred.
4471  *
4472  * @param dev
4473  *  Pointer to struct rte_eth_dev.
4474  *
4475  * @return
4476  *  - On success, zero.
4477  *  - On failure, a negative value.
4478  */
4479 static int
4480 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
4481 {
4482         struct ixgbe_interrupt *intr =
4483                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4484         int64_t timeout;
4485         struct ixgbe_hw *hw =
4486                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4487
4488         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4489
4490         if (intr->flags & IXGBE_FLAG_MAILBOX) {
4491                 ixgbe_pf_mbx_process(dev);
4492                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
4493         }
4494
4495         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4496                 ixgbe_handle_lasi(hw);
4497                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4498         }
4499
4500         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4501                 struct rte_eth_link link;
4502
4503                 /* get the link status before link update, for predicting later */
4504                 rte_eth_linkstatus_get(dev, &link);
4505
4506                 ixgbe_dev_link_update(dev, 0);
4507
4508                 /* likely to up */
4509                 if (!link.link_status)
4510                         /* handle it 1 sec later, wait it being stable */
4511                         timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4512                 /* likely to down */
4513                 else
4514                         /* handle it 4 sec later, wait it being stable */
4515                         timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4516
4517                 ixgbe_dev_link_status_print(dev);
4518                 if (rte_eal_alarm_set(timeout * 1000,
4519                                       ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4520                         PMD_DRV_LOG(ERR, "Error setting alarm");
4521                 else {
4522                         /* remember original mask */
4523                         intr->mask_original = intr->mask;
4524                         /* only disable lsc interrupt */
4525                         intr->mask &= ~IXGBE_EIMS_LSC;
4526                 }
4527         }
4528
4529         PMD_DRV_LOG(DEBUG, "enable intr immediately");
4530         ixgbe_enable_intr(dev);
4531
4532         return 0;
4533 }
4534
4535 /**
4536  * Interrupt handler which shall be registered for alarm callback for delayed
4537  * handling specific interrupt to wait for the stable nic state. As the
4538  * NIC interrupt state is not stable for ixgbe after link is just down,
4539  * it needs to wait 4 seconds to get the stable status.
4540  *
4541  * @param handle
4542  *  Pointer to interrupt handle.
4543  * @param param
4544  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4545  *
4546  * @return
4547  *  void
4548  */
4549 static void
4550 ixgbe_dev_interrupt_delayed_handler(void *param)
4551 {
4552         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4553         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4554         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4555         struct ixgbe_interrupt *intr =
4556                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4557         struct ixgbe_hw *hw =
4558                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4559         uint32_t eicr;
4560
4561         ixgbe_disable_intr(hw);
4562
4563         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4564         if (eicr & IXGBE_EICR_MAILBOX)
4565                 ixgbe_pf_mbx_process(dev);
4566
4567         if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4568                 ixgbe_handle_lasi(hw);
4569                 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4570         }
4571
4572         if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4573                 ixgbe_dev_link_update(dev, 0);
4574                 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4575                 ixgbe_dev_link_status_print(dev);
4576                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4577                                               NULL);
4578         }
4579
4580         if (intr->flags & IXGBE_FLAG_MACSEC) {
4581                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4582                                               NULL);
4583                 intr->flags &= ~IXGBE_FLAG_MACSEC;
4584         }
4585
4586         /* restore original mask */
4587         intr->mask = intr->mask_original;
4588         intr->mask_original = 0;
4589
4590         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4591         ixgbe_enable_intr(dev);
4592         rte_intr_ack(intr_handle);
4593 }
4594
4595 /**
4596  * Interrupt handler triggered by NIC  for handling
4597  * specific interrupt.
4598  *
4599  * @param handle
4600  *  Pointer to interrupt handle.
4601  * @param param
4602  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4603  *
4604  * @return
4605  *  void
4606  */
4607 static void
4608 ixgbe_dev_interrupt_handler(void *param)
4609 {
4610         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4611
4612         ixgbe_dev_interrupt_get_status(dev);
4613         ixgbe_dev_interrupt_action(dev);
4614 }
4615
4616 static int
4617 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4618 {
4619         struct ixgbe_hw *hw;
4620
4621         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4622         return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4623 }
4624
4625 static int
4626 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4627 {
4628         struct ixgbe_hw *hw;
4629
4630         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4631         return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4632 }
4633
4634 static int
4635 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4636 {
4637         struct ixgbe_hw *hw;
4638         uint32_t mflcn_reg;
4639         uint32_t fccfg_reg;
4640         int rx_pause;
4641         int tx_pause;
4642
4643         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4644
4645         fc_conf->pause_time = hw->fc.pause_time;
4646         fc_conf->high_water = hw->fc.high_water[0];
4647         fc_conf->low_water = hw->fc.low_water[0];
4648         fc_conf->send_xon = hw->fc.send_xon;
4649         fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4650
4651         /*
4652          * Return rx_pause status according to actual setting of
4653          * MFLCN register.
4654          */
4655         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4656         if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4657                 rx_pause = 1;
4658         else
4659                 rx_pause = 0;
4660
4661         /*
4662          * Return tx_pause status according to actual setting of
4663          * FCCFG register.
4664          */
4665         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4666         if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4667                 tx_pause = 1;
4668         else
4669                 tx_pause = 0;
4670
4671         if (rx_pause && tx_pause)
4672                 fc_conf->mode = RTE_FC_FULL;
4673         else if (rx_pause)
4674                 fc_conf->mode = RTE_FC_RX_PAUSE;
4675         else if (tx_pause)
4676                 fc_conf->mode = RTE_FC_TX_PAUSE;
4677         else
4678                 fc_conf->mode = RTE_FC_NONE;
4679
4680         return 0;
4681 }
4682
4683 static int
4684 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4685 {
4686         struct ixgbe_hw *hw;
4687         int err;
4688         uint32_t rx_buf_size;
4689         uint32_t max_high_water;
4690         uint32_t mflcn;
4691         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4692                 ixgbe_fc_none,
4693                 ixgbe_fc_rx_pause,
4694                 ixgbe_fc_tx_pause,
4695                 ixgbe_fc_full
4696         };
4697
4698         PMD_INIT_FUNC_TRACE();
4699
4700         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4701         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4702         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4703
4704         /*
4705          * At least reserve one Ethernet frame for watermark
4706          * high_water/low_water in kilo bytes for ixgbe
4707          */
4708         max_high_water = (rx_buf_size -
4709                         RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4710         if ((fc_conf->high_water > max_high_water) ||
4711                 (fc_conf->high_water < fc_conf->low_water)) {
4712                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4713                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4714                 return -EINVAL;
4715         }
4716
4717         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4718         hw->fc.pause_time     = fc_conf->pause_time;
4719         hw->fc.high_water[0]  = fc_conf->high_water;
4720         hw->fc.low_water[0]   = fc_conf->low_water;
4721         hw->fc.send_xon       = fc_conf->send_xon;
4722         hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4723
4724         err = ixgbe_fc_enable(hw);
4725
4726         /* Not negotiated is not an error case */
4727         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4728
4729                 /* check if we want to forward MAC frames - driver doesn't have native
4730                  * capability to do that, so we'll write the registers ourselves */
4731
4732                 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4733
4734                 /* set or clear MFLCN.PMCF bit depending on configuration */
4735                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4736                         mflcn |= IXGBE_MFLCN_PMCF;
4737                 else
4738                         mflcn &= ~IXGBE_MFLCN_PMCF;
4739
4740                 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4741                 IXGBE_WRITE_FLUSH(hw);
4742
4743                 return 0;
4744         }
4745
4746         PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4747         return -EIO;
4748 }
4749
4750 /**
4751  *  ixgbe_pfc_enable_generic - Enable flow control
4752  *  @hw: pointer to hardware structure
4753  *  @tc_num: traffic class number
4754  *  Enable flow control according to the current settings.
4755  */
4756 static int
4757 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4758 {
4759         int ret_val = 0;
4760         uint32_t mflcn_reg, fccfg_reg;
4761         uint32_t reg;
4762         uint32_t fcrtl, fcrth;
4763         uint8_t i;
4764         uint8_t nb_rx_en;
4765
4766         /* Validate the water mark configuration */
4767         if (!hw->fc.pause_time) {
4768                 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4769                 goto out;
4770         }
4771
4772         /* Low water mark of zero causes XOFF floods */
4773         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4774                  /* High/Low water can not be 0 */
4775                 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4776                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4777                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4778                         goto out;
4779                 }
4780
4781                 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4782                         PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4783                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4784                         goto out;
4785                 }
4786         }
4787         /* Negotiate the fc mode to use */
4788         ixgbe_fc_autoneg(hw);
4789
4790         /* Disable any previous flow control settings */
4791         mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4792         mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4793
4794         fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4795         fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4796
4797         switch (hw->fc.current_mode) {
4798         case ixgbe_fc_none:
4799                 /*
4800                  * If the count of enabled RX Priority Flow control >1,
4801                  * and the TX pause can not be disabled
4802                  */
4803                 nb_rx_en = 0;
4804                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4805                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4806                         if (reg & IXGBE_FCRTH_FCEN)
4807                                 nb_rx_en++;
4808                 }
4809                 if (nb_rx_en > 1)
4810                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4811                 break;
4812         case ixgbe_fc_rx_pause:
4813                 /*
4814                  * Rx Flow control is enabled and Tx Flow control is
4815                  * disabled by software override. Since there really
4816                  * isn't a way to advertise that we are capable of RX
4817                  * Pause ONLY, we will advertise that we support both
4818                  * symmetric and asymmetric Rx PAUSE.  Later, we will
4819                  * disable the adapter's ability to send PAUSE frames.
4820                  */
4821                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4822                 /*
4823                  * If the count of enabled RX Priority Flow control >1,
4824                  * and the TX pause can not be disabled
4825                  */
4826                 nb_rx_en = 0;
4827                 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4828                         reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4829                         if (reg & IXGBE_FCRTH_FCEN)
4830                                 nb_rx_en++;
4831                 }
4832                 if (nb_rx_en > 1)
4833                         fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4834                 break;
4835         case ixgbe_fc_tx_pause:
4836                 /*
4837                  * Tx Flow control is enabled, and Rx Flow control is
4838                  * disabled by software override.
4839                  */
4840                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4841                 break;
4842         case ixgbe_fc_full:
4843                 /* Flow control (both Rx and Tx) is enabled by SW override. */
4844                 mflcn_reg |= IXGBE_MFLCN_RPFCE;
4845                 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4846                 break;
4847         default:
4848                 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4849                 ret_val = IXGBE_ERR_CONFIG;
4850                 goto out;
4851         }
4852
4853         /* Set 802.3x based flow control settings. */
4854         mflcn_reg |= IXGBE_MFLCN_DPF;
4855         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4856         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4857
4858         /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4859         if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4860                 hw->fc.high_water[tc_num]) {
4861                 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4862                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4863                 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4864         } else {
4865                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4866                 /*
4867                  * In order to prevent Tx hangs when the internal Tx
4868                  * switch is enabled we must set the high water mark
4869                  * to the maximum FCRTH value.  This allows the Tx
4870                  * switch to function even under heavy Rx workloads.
4871                  */
4872                 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4873         }
4874         IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4875
4876         /* Configure pause time (2 TCs per register) */
4877         reg = hw->fc.pause_time * 0x00010001;
4878         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4879                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4880
4881         /* Configure flow control refresh threshold value */
4882         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4883
4884 out:
4885         return ret_val;
4886 }
4887
4888 static int
4889 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4890 {
4891         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4892         int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4893
4894         if (hw->mac.type != ixgbe_mac_82598EB) {
4895                 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4896         }
4897         return ret_val;
4898 }
4899
4900 static int
4901 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4902 {
4903         int err;
4904         uint32_t rx_buf_size;
4905         uint32_t max_high_water;
4906         uint8_t tc_num;
4907         uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4908         struct ixgbe_hw *hw =
4909                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4910         struct ixgbe_dcb_config *dcb_config =
4911                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4912
4913         enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4914                 ixgbe_fc_none,
4915                 ixgbe_fc_rx_pause,
4916                 ixgbe_fc_tx_pause,
4917                 ixgbe_fc_full
4918         };
4919
4920         PMD_INIT_FUNC_TRACE();
4921
4922         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4923         tc_num = map[pfc_conf->priority];
4924         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4925         PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4926         /*
4927          * At least reserve one Ethernet frame for watermark
4928          * high_water/low_water in kilo bytes for ixgbe
4929          */
4930         max_high_water = (rx_buf_size -
4931                         RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4932         if ((pfc_conf->fc.high_water > max_high_water) ||
4933             (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4934                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4935                 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4936                 return -EINVAL;
4937         }
4938
4939         hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4940         hw->fc.pause_time = pfc_conf->fc.pause_time;
4941         hw->fc.send_xon = pfc_conf->fc.send_xon;
4942         hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4943         hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4944
4945         err = ixgbe_dcb_pfc_enable(dev, tc_num);
4946
4947         /* Not negotiated is not an error case */
4948         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4949                 return 0;
4950
4951         PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4952         return -EIO;
4953 }
4954
4955 static int
4956 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4957                           struct rte_eth_rss_reta_entry64 *reta_conf,
4958                           uint16_t reta_size)
4959 {
4960         uint16_t i, sp_reta_size;
4961         uint8_t j, mask;
4962         uint32_t reta, r;
4963         uint16_t idx, shift;
4964         struct ixgbe_adapter *adapter = dev->data->dev_private;
4965         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4966         uint32_t reta_reg;
4967
4968         PMD_INIT_FUNC_TRACE();
4969
4970         if (!ixgbe_rss_update_sp(hw->mac.type)) {
4971                 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4972                         "NIC.");
4973                 return -ENOTSUP;
4974         }
4975
4976         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4977         if (reta_size != sp_reta_size) {
4978                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4979                         "(%d) doesn't match the number hardware can supported "
4980                         "(%d)", reta_size, sp_reta_size);
4981                 return -EINVAL;
4982         }
4983
4984         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4985                 idx = i / RTE_RETA_GROUP_SIZE;
4986                 shift = i % RTE_RETA_GROUP_SIZE;
4987                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4988                                                 IXGBE_4_BIT_MASK);
4989                 if (!mask)
4990                         continue;
4991                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4992                 if (mask == IXGBE_4_BIT_MASK)
4993                         r = 0;
4994                 else
4995                         r = IXGBE_READ_REG(hw, reta_reg);
4996                 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4997                         if (mask & (0x1 << j))
4998                                 reta |= reta_conf[idx].reta[shift + j] <<
4999                                                         (CHAR_BIT * j);
5000                         else
5001                                 reta |= r & (IXGBE_8_BIT_MASK <<
5002                                                 (CHAR_BIT * j));
5003                 }
5004                 IXGBE_WRITE_REG(hw, reta_reg, reta);
5005         }
5006         adapter->rss_reta_updated = 1;
5007
5008         return 0;
5009 }
5010
5011 static int
5012 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
5013                          struct rte_eth_rss_reta_entry64 *reta_conf,
5014                          uint16_t reta_size)
5015 {
5016         uint16_t i, sp_reta_size;
5017         uint8_t j, mask;
5018         uint32_t reta;
5019         uint16_t idx, shift;
5020         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5021         uint32_t reta_reg;
5022
5023         PMD_INIT_FUNC_TRACE();
5024         sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5025         if (reta_size != sp_reta_size) {
5026                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
5027                         "(%d) doesn't match the number hardware can supported "
5028                         "(%d)", reta_size, sp_reta_size);
5029                 return -EINVAL;
5030         }
5031
5032         for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
5033                 idx = i / RTE_RETA_GROUP_SIZE;
5034                 shift = i % RTE_RETA_GROUP_SIZE;
5035                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
5036                                                 IXGBE_4_BIT_MASK);
5037                 if (!mask)
5038                         continue;
5039
5040                 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5041                 reta = IXGBE_READ_REG(hw, reta_reg);
5042                 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
5043                         if (mask & (0x1 << j))
5044                                 reta_conf[idx].reta[shift + j] =
5045                                         ((reta >> (CHAR_BIT * j)) &
5046                                                 IXGBE_8_BIT_MASK);
5047                 }
5048         }
5049
5050         return 0;
5051 }
5052
5053 static int
5054 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
5055                                 uint32_t index, uint32_t pool)
5056 {
5057         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5058         uint32_t enable_addr = 1;
5059
5060         return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
5061                              pool, enable_addr);
5062 }
5063
5064 static void
5065 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
5066 {
5067         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5068
5069         ixgbe_clear_rar(hw, index);
5070 }
5071
5072 static int
5073 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
5074 {
5075         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5076
5077         ixgbe_remove_rar(dev, 0);
5078         ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
5079
5080         return 0;
5081 }
5082
5083 static bool
5084 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
5085 {
5086         if (strcmp(dev->device->driver->name, drv->driver.name))
5087                 return false;
5088
5089         return true;
5090 }
5091
5092 bool
5093 is_ixgbe_supported(struct rte_eth_dev *dev)
5094 {
5095         return is_device_supported(dev, &rte_ixgbe_pmd);
5096 }
5097
5098 static int
5099 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
5100 {
5101         uint32_t hlreg0;
5102         uint32_t maxfrs;
5103         struct ixgbe_hw *hw;
5104         struct rte_eth_dev_info dev_info;
5105         uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
5106         struct rte_eth_dev_data *dev_data = dev->data;
5107         int ret;
5108
5109         ret = ixgbe_dev_info_get(dev, &dev_info);
5110         if (ret != 0)
5111                 return ret;
5112
5113         /* check that mtu is within the allowed range */
5114         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
5115                 return -EINVAL;
5116
5117         /* If device is started, refuse mtu that requires the support of
5118          * scattered packets when this feature has not been enabled before.
5119          */
5120         if (dev_data->dev_started && !dev_data->scattered_rx &&
5121             (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
5122              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
5123                 PMD_INIT_LOG(ERR, "Stop port first.");
5124                 return -EINVAL;
5125         }
5126
5127         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5128         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5129
5130         /* switch to jumbo mode if needed */
5131         if (frame_size > RTE_ETHER_MAX_LEN) {
5132                 dev->data->dev_conf.rxmode.offloads |=
5133                         DEV_RX_OFFLOAD_JUMBO_FRAME;
5134                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
5135         } else {
5136                 dev->data->dev_conf.rxmode.offloads &=
5137                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
5138                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
5139         }
5140         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5141
5142         /* update max frame size */
5143         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
5144
5145         maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
5146         maxfrs &= 0x0000FFFF;
5147         maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
5148         IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
5149
5150         return 0;
5151 }
5152
5153 /*
5154  * Virtual Function operations
5155  */
5156 static void
5157 ixgbevf_intr_disable(struct rte_eth_dev *dev)
5158 {
5159         struct ixgbe_interrupt *intr =
5160                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5161         struct ixgbe_hw *hw =
5162                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5163
5164         PMD_INIT_FUNC_TRACE();
5165
5166         /* Clear interrupt mask to stop from interrupts being generated */
5167         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
5168
5169         IXGBE_WRITE_FLUSH(hw);
5170
5171         /* Clear mask value. */
5172         intr->mask = 0;
5173 }
5174
5175 static void
5176 ixgbevf_intr_enable(struct rte_eth_dev *dev)
5177 {
5178         struct ixgbe_interrupt *intr =
5179                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5180         struct ixgbe_hw *hw =
5181                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5182
5183         PMD_INIT_FUNC_TRACE();
5184
5185         /* VF enable interrupt autoclean */
5186         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
5187         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
5188         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
5189
5190         IXGBE_WRITE_FLUSH(hw);
5191
5192         /* Save IXGBE_VTEIMS value to mask. */
5193         intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
5194 }
5195
5196 static int
5197 ixgbevf_dev_configure(struct rte_eth_dev *dev)
5198 {
5199         struct rte_eth_conf *conf = &dev->data->dev_conf;
5200         struct ixgbe_adapter *adapter = dev->data->dev_private;
5201
5202         PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
5203                      dev->data->port_id);
5204
5205         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
5206                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
5207
5208         /*
5209          * VF has no ability to enable/disable HW CRC
5210          * Keep the persistent behavior the same as Host PF
5211          */
5212 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
5213         if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
5214                 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
5215                 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
5216         }
5217 #else
5218         if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
5219                 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
5220                 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
5221         }
5222 #endif
5223
5224         /*
5225          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5226          * allocation or vector Rx preconditions we will reset it.
5227          */
5228         adapter->rx_bulk_alloc_allowed = true;
5229         adapter->rx_vec_allowed = true;
5230
5231         return 0;
5232 }
5233
5234 static int
5235 ixgbevf_dev_start(struct rte_eth_dev *dev)
5236 {
5237         struct ixgbe_hw *hw =
5238                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5239         uint32_t intr_vector = 0;
5240         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5241         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5242
5243         int err, mask = 0;
5244
5245         PMD_INIT_FUNC_TRACE();
5246
5247         /* Stop the link setup handler before resetting the HW. */
5248         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
5249
5250         err = hw->mac.ops.reset_hw(hw);
5251         if (err) {
5252                 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
5253                 return err;
5254         }
5255         hw->mac.get_link_status = true;
5256
5257         /* negotiate mailbox API version to use with the PF. */
5258         ixgbevf_negotiate_api(hw);
5259
5260         ixgbevf_dev_tx_init(dev);
5261
5262         /* This can fail when allocating mbufs for descriptor rings */
5263         err = ixgbevf_dev_rx_init(dev);
5264         if (err) {
5265                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5266                 ixgbe_dev_clear_queues(dev);
5267                 return err;
5268         }
5269
5270         /* Set vfta */
5271         ixgbevf_set_vfta_all(dev, 1);
5272
5273         /* Set HW strip */
5274         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5275                 ETH_VLAN_EXTEND_MASK;
5276         err = ixgbevf_vlan_offload_config(dev, mask);
5277         if (err) {
5278                 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
5279                 ixgbe_dev_clear_queues(dev);
5280                 return err;
5281         }
5282
5283         ixgbevf_dev_rxtx_start(dev);
5284
5285         /* check and configure queue intr-vector mapping */
5286         if (rte_intr_cap_multiple(intr_handle) &&
5287             dev->data->dev_conf.intr_conf.rxq) {
5288                 /* According to datasheet, only vector 0/1/2 can be used,
5289                  * now only one vector is used for Rx queue
5290                  */
5291                 intr_vector = 1;
5292                 if (rte_intr_efd_enable(intr_handle, intr_vector))
5293                         return -1;
5294         }
5295
5296         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5297                 intr_handle->intr_vec =
5298                         rte_zmalloc("intr_vec",
5299                                     dev->data->nb_rx_queues * sizeof(int), 0);
5300                 if (intr_handle->intr_vec == NULL) {
5301                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5302                                      " intr_vec", dev->data->nb_rx_queues);
5303                         return -ENOMEM;
5304                 }
5305         }
5306         ixgbevf_configure_msix(dev);
5307
5308         /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5309          * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5310          * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5311          * is not cleared, it will fail when following rte_intr_enable( ) tries
5312          * to map Rx queue interrupt to other VFIO vectors.
5313          * So clear uio/vfio intr/evevnfd first to avoid failure.
5314          */
5315         rte_intr_disable(intr_handle);
5316
5317         rte_intr_enable(intr_handle);
5318
5319         /* Re-enable interrupt for VF */
5320         ixgbevf_intr_enable(dev);
5321
5322         /*
5323          * Update link status right before return, because it may
5324          * start link configuration process in a separate thread.
5325          */
5326         ixgbevf_dev_link_update(dev, 0);
5327
5328         hw->adapter_stopped = false;
5329
5330         return 0;
5331 }
5332
5333 static void
5334 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5335 {
5336         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5337         struct ixgbe_adapter *adapter = dev->data->dev_private;
5338         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5339         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5340
5341         if (hw->adapter_stopped)
5342                 return;
5343
5344         PMD_INIT_FUNC_TRACE();
5345
5346         rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
5347
5348         ixgbevf_intr_disable(dev);
5349
5350         hw->adapter_stopped = 1;
5351         ixgbe_stop_adapter(hw);
5352
5353         /*
5354           * Clear what we set, but we still keep shadow_vfta to
5355           * restore after device starts
5356           */
5357         ixgbevf_set_vfta_all(dev, 0);
5358
5359         /* Clear stored conf */
5360         dev->data->scattered_rx = 0;
5361
5362         ixgbe_dev_clear_queues(dev);
5363
5364         /* Clean datapath event and queue/vec mapping */
5365         rte_intr_efd_disable(intr_handle);
5366         if (intr_handle->intr_vec != NULL) {
5367                 rte_free(intr_handle->intr_vec);
5368                 intr_handle->intr_vec = NULL;
5369         }
5370
5371         adapter->rss_reta_updated = 0;
5372 }
5373
5374 static void
5375 ixgbevf_dev_close(struct rte_eth_dev *dev)
5376 {
5377         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5378         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5379         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5380
5381         PMD_INIT_FUNC_TRACE();
5382
5383         ixgbe_reset_hw(hw);
5384
5385         ixgbevf_dev_stop(dev);
5386
5387         ixgbe_dev_free_queues(dev);
5388
5389         /**
5390          * Remove the VF MAC address ro ensure
5391          * that the VF traffic goes to the PF
5392          * after stop, close and detach of the VF
5393          **/
5394         ixgbevf_remove_mac_addr(dev, 0);
5395
5396         dev->dev_ops = NULL;
5397         dev->rx_pkt_burst = NULL;
5398         dev->tx_pkt_burst = NULL;
5399
5400         rte_intr_disable(intr_handle);
5401         rte_intr_callback_unregister(intr_handle,
5402                                      ixgbevf_dev_interrupt_handler, dev);
5403 }
5404
5405 /*
5406  * Reset VF device
5407  */
5408 static int
5409 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5410 {
5411         int ret;
5412
5413         ret = eth_ixgbevf_dev_uninit(dev);
5414         if (ret)
5415                 return ret;
5416
5417         ret = eth_ixgbevf_dev_init(dev);
5418
5419         return ret;
5420 }
5421
5422 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5423 {
5424         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5425         struct ixgbe_vfta *shadow_vfta =
5426                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5427         int i = 0, j = 0, vfta = 0, mask = 1;
5428
5429         for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5430                 vfta = shadow_vfta->vfta[i];
5431                 if (vfta) {
5432                         mask = 1;
5433                         for (j = 0; j < 32; j++) {
5434                                 if (vfta & mask)
5435                                         ixgbe_set_vfta(hw, (i<<5)+j, 0,
5436                                                        on, false);
5437                                 mask <<= 1;
5438                         }
5439                 }
5440         }
5441
5442 }
5443
5444 static int
5445 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5446 {
5447         struct ixgbe_hw *hw =
5448                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5449         struct ixgbe_vfta *shadow_vfta =
5450                 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5451         uint32_t vid_idx = 0;
5452         uint32_t vid_bit = 0;
5453         int ret = 0;
5454
5455         PMD_INIT_FUNC_TRACE();
5456
5457         /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5458         ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5459         if (ret) {
5460                 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5461                 return ret;
5462         }
5463         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5464         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5465
5466         /* Save what we set and retore it after device reset */
5467         if (on)
5468                 shadow_vfta->vfta[vid_idx] |= vid_bit;
5469         else
5470                 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5471
5472         return 0;
5473 }
5474
5475 static void
5476 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5477 {
5478         struct ixgbe_hw *hw =
5479                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5480         uint32_t ctrl;
5481
5482         PMD_INIT_FUNC_TRACE();
5483
5484         if (queue >= hw->mac.max_rx_queues)
5485                 return;
5486
5487         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5488         if (on)
5489                 ctrl |= IXGBE_RXDCTL_VME;
5490         else
5491                 ctrl &= ~IXGBE_RXDCTL_VME;
5492         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5493
5494         ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5495 }
5496
5497 static int
5498 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
5499 {
5500         struct ixgbe_rx_queue *rxq;
5501         uint16_t i;
5502         int on = 0;
5503
5504         /* VF function only support hw strip feature, others are not support */
5505         if (mask & ETH_VLAN_STRIP_MASK) {
5506                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5507                         rxq = dev->data->rx_queues[i];
5508                         on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
5509                         ixgbevf_vlan_strip_queue_set(dev, i, on);
5510                 }
5511         }
5512
5513         return 0;
5514 }
5515
5516 static int
5517 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5518 {
5519         ixgbe_config_vlan_strip_on_all_queues(dev, mask);
5520
5521         ixgbevf_vlan_offload_config(dev, mask);
5522
5523         return 0;
5524 }
5525
5526 int
5527 ixgbe_vt_check(struct ixgbe_hw *hw)
5528 {
5529         uint32_t reg_val;
5530
5531         /* if Virtualization Technology is enabled */
5532         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5533         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5534                 PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5535                 return -1;
5536         }
5537
5538         return 0;
5539 }
5540
5541 static uint32_t
5542 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr)
5543 {
5544         uint32_t vector = 0;
5545
5546         switch (hw->mac.mc_filter_type) {
5547         case 0:   /* use bits [47:36] of the address */
5548                 vector = ((uc_addr->addr_bytes[4] >> 4) |
5549                         (((uint16_t)uc_addr->addr_bytes[5]) << 4));
5550                 break;
5551         case 1:   /* use bits [46:35] of the address */
5552                 vector = ((uc_addr->addr_bytes[4] >> 3) |
5553                         (((uint16_t)uc_addr->addr_bytes[5]) << 5));
5554                 break;
5555         case 2:   /* use bits [45:34] of the address */
5556                 vector = ((uc_addr->addr_bytes[4] >> 2) |
5557                         (((uint16_t)uc_addr->addr_bytes[5]) << 6));
5558                 break;
5559         case 3:   /* use bits [43:32] of the address */
5560                 vector = ((uc_addr->addr_bytes[4]) |
5561                         (((uint16_t)uc_addr->addr_bytes[5]) << 8));
5562                 break;
5563         default:  /* Invalid mc_filter_type */
5564                 break;
5565         }
5566
5567         /* vector can only be 12-bits or boundary will be exceeded */
5568         vector &= 0xFFF;
5569         return vector;
5570 }
5571
5572 static int
5573 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,
5574                         struct rte_ether_addr *mac_addr, uint8_t on)
5575 {
5576         uint32_t vector;
5577         uint32_t uta_idx;
5578         uint32_t reg_val;
5579         uint32_t uta_shift;
5580         uint32_t rc;
5581         const uint32_t ixgbe_uta_idx_mask = 0x7F;
5582         const uint32_t ixgbe_uta_bit_shift = 5;
5583         const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5584         const uint32_t bit1 = 0x1;
5585
5586         struct ixgbe_hw *hw =
5587                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5588         struct ixgbe_uta_info *uta_info =
5589                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5590
5591         /* The UTA table only exists on 82599 hardware and newer */
5592         if (hw->mac.type < ixgbe_mac_82599EB)
5593                 return -ENOTSUP;
5594
5595         vector = ixgbe_uta_vector(hw, mac_addr);
5596         uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5597         uta_shift = vector & ixgbe_uta_bit_mask;
5598
5599         rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5600         if (rc == on)
5601                 return 0;
5602
5603         reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5604         if (on) {
5605                 uta_info->uta_in_use++;
5606                 reg_val |= (bit1 << uta_shift);
5607                 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5608         } else {
5609                 uta_info->uta_in_use--;
5610                 reg_val &= ~(bit1 << uta_shift);
5611                 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5612         }
5613
5614         IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5615
5616         if (uta_info->uta_in_use > 0)
5617                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5618                                 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5619         else
5620                 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5621
5622         return 0;
5623 }
5624
5625 static int
5626 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5627 {
5628         int i;
5629         struct ixgbe_hw *hw =
5630                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5631         struct ixgbe_uta_info *uta_info =
5632                 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5633
5634         /* The UTA table only exists on 82599 hardware and newer */
5635         if (hw->mac.type < ixgbe_mac_82599EB)
5636                 return -ENOTSUP;
5637
5638         if (on) {
5639                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5640                         uta_info->uta_shadow[i] = ~0;
5641                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5642                 }
5643         } else {
5644                 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5645                         uta_info->uta_shadow[i] = 0;
5646                         IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5647                 }
5648         }
5649         return 0;
5650
5651 }
5652
5653 uint32_t
5654 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5655 {
5656         uint32_t new_val = orig_val;
5657
5658         if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5659                 new_val |= IXGBE_VMOLR_AUPE;
5660         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5661                 new_val |= IXGBE_VMOLR_ROMPE;
5662         if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5663                 new_val |= IXGBE_VMOLR_ROPE;
5664         if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5665                 new_val |= IXGBE_VMOLR_BAM;
5666         if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5667                 new_val |= IXGBE_VMOLR_MPE;
5668
5669         return new_val;
5670 }
5671
5672 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5673 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5674 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5675 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5676 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5677         ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5678         ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5679
5680 static int
5681 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5682                       struct rte_eth_mirror_conf *mirror_conf,
5683                       uint8_t rule_id, uint8_t on)
5684 {
5685         uint32_t mr_ctl, vlvf;
5686         uint32_t mp_lsb = 0;
5687         uint32_t mv_msb = 0;
5688         uint32_t mv_lsb = 0;
5689         uint32_t mp_msb = 0;
5690         uint8_t i = 0;
5691         int reg_index = 0;
5692         uint64_t vlan_mask = 0;
5693
5694         const uint8_t pool_mask_offset = 32;
5695         const uint8_t vlan_mask_offset = 32;
5696         const uint8_t dst_pool_offset = 8;
5697         const uint8_t rule_mr_offset  = 4;
5698         const uint8_t mirror_rule_mask = 0x0F;
5699
5700         struct ixgbe_mirror_info *mr_info =
5701                         (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5702         struct ixgbe_hw *hw =
5703                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5704         uint8_t mirror_type = 0;
5705
5706         if (ixgbe_vt_check(hw) < 0)
5707                 return -ENOTSUP;
5708
5709         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5710                 return -EINVAL;
5711
5712         if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5713                 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5714                             mirror_conf->rule_type);
5715                 return -EINVAL;
5716         }
5717
5718         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5719                 mirror_type |= IXGBE_MRCTL_VLME;
5720                 /* Check if vlan id is valid and find conresponding VLAN ID
5721                  * index in VLVF
5722                  */
5723                 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5724                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5725                                 /* search vlan id related pool vlan filter
5726                                  * index
5727                                  */
5728                                 reg_index = ixgbe_find_vlvf_slot(
5729                                                 hw,
5730                                                 mirror_conf->vlan.vlan_id[i],
5731                                                 false);
5732                                 if (reg_index < 0)
5733                                         return -EINVAL;
5734                                 vlvf = IXGBE_READ_REG(hw,
5735                                                       IXGBE_VLVF(reg_index));
5736                                 if ((vlvf & IXGBE_VLVF_VIEN) &&
5737                                     ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5738                                       mirror_conf->vlan.vlan_id[i]))
5739                                         vlan_mask |= (1ULL << reg_index);
5740                                 else
5741                                         return -EINVAL;
5742                         }
5743                 }
5744
5745                 if (on) {
5746                         mv_lsb = vlan_mask & 0xFFFFFFFF;
5747                         mv_msb = vlan_mask >> vlan_mask_offset;
5748
5749                         mr_info->mr_conf[rule_id].vlan.vlan_mask =
5750                                                 mirror_conf->vlan.vlan_mask;
5751                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5752                                 if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5753                                         mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5754                                                 mirror_conf->vlan.vlan_id[i];
5755                         }
5756                 } else {
5757                         mv_lsb = 0;
5758                         mv_msb = 0;
5759                         mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5760                         for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5761                                 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5762                 }
5763         }
5764
5765         /**
5766          * if enable pool mirror, write related pool mask register,if disable
5767          * pool mirror, clear PFMRVM register
5768          */
5769         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5770                 mirror_type |= IXGBE_MRCTL_VPME;
5771                 if (on) {
5772                         mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5773                         mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5774                         mr_info->mr_conf[rule_id].pool_mask =
5775                                         mirror_conf->pool_mask;
5776
5777                 } else {
5778                         mp_lsb = 0;
5779                         mp_msb = 0;
5780                         mr_info->mr_conf[rule_id].pool_mask = 0;
5781                 }
5782         }
5783         if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5784                 mirror_type |= IXGBE_MRCTL_UPME;
5785         if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5786                 mirror_type |= IXGBE_MRCTL_DPME;
5787
5788         /* read  mirror control register and recalculate it */
5789         mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5790
5791         if (on) {
5792                 mr_ctl |= mirror_type;
5793                 mr_ctl &= mirror_rule_mask;
5794                 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5795         } else {
5796                 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5797         }
5798
5799         mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5800         mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5801
5802         /* write mirrror control  register */
5803         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5804
5805         /* write pool mirrror control  register */
5806         if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5807                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5808                 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5809                                 mp_msb);
5810         }
5811         /* write VLAN mirrror control  register */
5812         if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5813                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5814                 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5815                                 mv_msb);
5816         }
5817
5818         return 0;
5819 }
5820
5821 static int
5822 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5823 {
5824         int mr_ctl = 0;
5825         uint32_t lsb_val = 0;
5826         uint32_t msb_val = 0;
5827         const uint8_t rule_mr_offset = 4;
5828
5829         struct ixgbe_hw *hw =
5830                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5831         struct ixgbe_mirror_info *mr_info =
5832                 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5833
5834         if (ixgbe_vt_check(hw) < 0)
5835                 return -ENOTSUP;
5836
5837         if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5838                 return -EINVAL;
5839
5840         memset(&mr_info->mr_conf[rule_id], 0,
5841                sizeof(struct rte_eth_mirror_conf));
5842
5843         /* clear PFVMCTL register */
5844         IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5845
5846         /* clear pool mask register */
5847         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5848         IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5849
5850         /* clear vlan mask register */
5851         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5852         IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5853
5854         return 0;
5855 }
5856
5857 static int
5858 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5859 {
5860         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5861         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5862         struct ixgbe_interrupt *intr =
5863                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5864         struct ixgbe_hw *hw =
5865                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5866         uint32_t vec = IXGBE_MISC_VEC_ID;
5867
5868         if (rte_intr_allow_others(intr_handle))
5869                 vec = IXGBE_RX_VEC_START;
5870         intr->mask |= (1 << vec);
5871         RTE_SET_USED(queue_id);
5872         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5873
5874         rte_intr_ack(intr_handle);
5875
5876         return 0;
5877 }
5878
5879 static int
5880 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5881 {
5882         struct ixgbe_interrupt *intr =
5883                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5884         struct ixgbe_hw *hw =
5885                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5886         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5887         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5888         uint32_t vec = IXGBE_MISC_VEC_ID;
5889
5890         if (rte_intr_allow_others(intr_handle))
5891                 vec = IXGBE_RX_VEC_START;
5892         intr->mask &= ~(1 << vec);
5893         RTE_SET_USED(queue_id);
5894         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5895
5896         return 0;
5897 }
5898
5899 static int
5900 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5901 {
5902         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5903         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5904         uint32_t mask;
5905         struct ixgbe_hw *hw =
5906                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5907         struct ixgbe_interrupt *intr =
5908                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5909
5910         if (queue_id < 16) {
5911                 ixgbe_disable_intr(hw);
5912                 intr->mask |= (1 << queue_id);
5913                 ixgbe_enable_intr(dev);
5914         } else if (queue_id < 32) {
5915                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5916                 mask &= (1 << queue_id);
5917                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5918         } else if (queue_id < 64) {
5919                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5920                 mask &= (1 << (queue_id - 32));
5921                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5922         }
5923         rte_intr_ack(intr_handle);
5924
5925         return 0;
5926 }
5927
5928 static int
5929 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5930 {
5931         uint32_t mask;
5932         struct ixgbe_hw *hw =
5933                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5934         struct ixgbe_interrupt *intr =
5935                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5936
5937         if (queue_id < 16) {
5938                 ixgbe_disable_intr(hw);
5939                 intr->mask &= ~(1 << queue_id);
5940                 ixgbe_enable_intr(dev);
5941         } else if (queue_id < 32) {
5942                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5943                 mask &= ~(1 << queue_id);
5944                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5945         } else if (queue_id < 64) {
5946                 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5947                 mask &= ~(1 << (queue_id - 32));
5948                 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5949         }
5950
5951         return 0;
5952 }
5953
5954 static void
5955 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5956                      uint8_t queue, uint8_t msix_vector)
5957 {
5958         uint32_t tmp, idx;
5959
5960         if (direction == -1) {
5961                 /* other causes */
5962                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5963                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5964                 tmp &= ~0xFF;
5965                 tmp |= msix_vector;
5966                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5967         } else {
5968                 /* rx or tx cause */
5969                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5970                 idx = ((16 * (queue & 1)) + (8 * direction));
5971                 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5972                 tmp &= ~(0xFF << idx);
5973                 tmp |= (msix_vector << idx);
5974                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5975         }
5976 }
5977
5978 /**
5979  * set the IVAR registers, mapping interrupt causes to vectors
5980  * @param hw
5981  *  pointer to ixgbe_hw struct
5982  * @direction
5983  *  0 for Rx, 1 for Tx, -1 for other causes
5984  * @queue
5985  *  queue to map the corresponding interrupt to
5986  * @msix_vector
5987  *  the vector to map to the corresponding queue
5988  */
5989 static void
5990 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5991                    uint8_t queue, uint8_t msix_vector)
5992 {
5993         uint32_t tmp, idx;
5994
5995         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5996         if (hw->mac.type == ixgbe_mac_82598EB) {
5997                 if (direction == -1)
5998                         direction = 0;
5999                 idx = (((direction * 64) + queue) >> 2) & 0x1F;
6000                 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
6001                 tmp &= ~(0xFF << (8 * (queue & 0x3)));
6002                 tmp |= (msix_vector << (8 * (queue & 0x3)));
6003                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
6004         } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
6005                         (hw->mac.type == ixgbe_mac_X540) ||
6006                         (hw->mac.type == ixgbe_mac_X550) ||
6007                         (hw->mac.type == ixgbe_mac_X550EM_x)) {
6008                 if (direction == -1) {
6009                         /* other causes */
6010                         idx = ((queue & 1) * 8);
6011                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
6012                         tmp &= ~(0xFF << idx);
6013                         tmp |= (msix_vector << idx);
6014                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
6015                 } else {
6016                         /* rx or tx causes */
6017                         idx = ((16 * (queue & 1)) + (8 * direction));
6018                         tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
6019                         tmp &= ~(0xFF << idx);
6020                         tmp |= (msix_vector << idx);
6021                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
6022                 }
6023         }
6024 }
6025
6026 static void
6027 ixgbevf_configure_msix(struct rte_eth_dev *dev)
6028 {
6029         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6030         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6031         struct ixgbe_hw *hw =
6032                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6033         uint32_t q_idx;
6034         uint32_t vector_idx = IXGBE_MISC_VEC_ID;
6035         uint32_t base = IXGBE_MISC_VEC_ID;
6036
6037         /* Configure VF other cause ivar */
6038         ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
6039
6040         /* won't configure msix register if no mapping is done
6041          * between intr vector and event fd.
6042          */
6043         if (!rte_intr_dp_is_en(intr_handle))
6044                 return;
6045
6046         if (rte_intr_allow_others(intr_handle)) {
6047                 base = IXGBE_RX_VEC_START;
6048                 vector_idx = IXGBE_RX_VEC_START;
6049         }
6050
6051         /* Configure all RX queues of VF */
6052         for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
6053                 /* Force all queue use vector 0,
6054                  * as IXGBE_VF_MAXMSIVECOTR = 1
6055                  */
6056                 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
6057                 intr_handle->intr_vec[q_idx] = vector_idx;
6058                 if (vector_idx < base + intr_handle->nb_efd - 1)
6059                         vector_idx++;
6060         }
6061
6062         /* As RX queue setting above show, all queues use the vector 0.
6063          * Set only the ITR value of IXGBE_MISC_VEC_ID.
6064          */
6065         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
6066                         IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6067                         | IXGBE_EITR_CNT_WDIS);
6068 }
6069
6070 /**
6071  * Sets up the hardware to properly generate MSI-X interrupts
6072  * @hw
6073  *  board private structure
6074  */
6075 static void
6076 ixgbe_configure_msix(struct rte_eth_dev *dev)
6077 {
6078         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
6079         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6080         struct ixgbe_hw *hw =
6081                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6082         uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
6083         uint32_t vec = IXGBE_MISC_VEC_ID;
6084         uint32_t mask;
6085         uint32_t gpie;
6086
6087         /* won't configure msix register if no mapping is done
6088          * between intr vector and event fd
6089          * but if misx has been enabled already, need to configure
6090          * auto clean, auto mask and throttling.
6091          */
6092         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6093         if (!rte_intr_dp_is_en(intr_handle) &&
6094             !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
6095                 return;
6096
6097         if (rte_intr_allow_others(intr_handle))
6098                 vec = base = IXGBE_RX_VEC_START;
6099
6100         /* setup GPIE for MSI-x mode */
6101         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6102         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
6103                 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
6104         /* auto clearing and auto setting corresponding bits in EIMS
6105          * when MSI-X interrupt is triggered
6106          */
6107         if (hw->mac.type == ixgbe_mac_82598EB) {
6108                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
6109         } else {
6110                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
6111                 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
6112         }
6113         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
6114
6115         /* Populate the IVAR table and set the ITR values to the
6116          * corresponding register.
6117          */
6118         if (rte_intr_dp_is_en(intr_handle)) {
6119                 for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
6120                         queue_id++) {
6121                         /* by default, 1:1 mapping */
6122                         ixgbe_set_ivar_map(hw, 0, queue_id, vec);
6123                         intr_handle->intr_vec[queue_id] = vec;
6124                         if (vec < base + intr_handle->nb_efd - 1)
6125                                 vec++;
6126                 }
6127
6128                 switch (hw->mac.type) {
6129                 case ixgbe_mac_82598EB:
6130                         ixgbe_set_ivar_map(hw, -1,
6131                                            IXGBE_IVAR_OTHER_CAUSES_INDEX,
6132                                            IXGBE_MISC_VEC_ID);
6133                         break;
6134                 case ixgbe_mac_82599EB:
6135                 case ixgbe_mac_X540:
6136                 case ixgbe_mac_X550:
6137                 case ixgbe_mac_X550EM_x:
6138                         ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
6139                         break;
6140                 default:
6141                         break;
6142                 }
6143         }
6144         IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
6145                         IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6146                         | IXGBE_EITR_CNT_WDIS);
6147
6148         /* set up to autoclear timer, and the vectors */
6149         mask = IXGBE_EIMS_ENABLE_MASK;
6150         mask &= ~(IXGBE_EIMS_OTHER |
6151                   IXGBE_EIMS_MAILBOX |
6152                   IXGBE_EIMS_LSC);
6153
6154         IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
6155 }
6156
6157 int
6158 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
6159                            uint16_t queue_idx, uint16_t tx_rate)
6160 {
6161         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6162         struct rte_eth_rxmode *rxmode;
6163         uint32_t rf_dec, rf_int;
6164         uint32_t bcnrc_val;
6165         uint16_t link_speed = dev->data->dev_link.link_speed;
6166
6167         if (queue_idx >= hw->mac.max_tx_queues)
6168                 return -EINVAL;
6169
6170         if (tx_rate != 0) {
6171                 /* Calculate the rate factor values to set */
6172                 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
6173                 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
6174                 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
6175
6176                 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
6177                 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
6178                                 IXGBE_RTTBCNRC_RF_INT_MASK_M);
6179                 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
6180         } else {
6181                 bcnrc_val = 0;
6182         }
6183
6184         rxmode = &dev->data->dev_conf.rxmode;
6185         /*
6186          * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6187          * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
6188          * set as 0x4.
6189          */
6190         if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
6191             (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
6192                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6193                         IXGBE_MMW_SIZE_JUMBO_FRAME);
6194         else
6195                 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6196                         IXGBE_MMW_SIZE_DEFAULT);
6197
6198         /* Set RTTBCNRC of queue X */
6199         IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
6200         IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
6201         IXGBE_WRITE_FLUSH(hw);
6202
6203         return 0;
6204 }
6205
6206 static int
6207 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
6208                      __attribute__((unused)) uint32_t index,
6209                      __attribute__((unused)) uint32_t pool)
6210 {
6211         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6212         int diag;
6213
6214         /*
6215          * On a 82599 VF, adding again the same MAC addr is not an idempotent
6216          * operation. Trap this case to avoid exhausting the [very limited]
6217          * set of PF resources used to store VF MAC addresses.
6218          */
6219         if (memcmp(hw->mac.perm_addr, mac_addr,
6220                         sizeof(struct rte_ether_addr)) == 0)
6221                 return -1;
6222         diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6223         if (diag != 0)
6224                 PMD_DRV_LOG(ERR, "Unable to add MAC address "
6225                             "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
6226                             mac_addr->addr_bytes[0],
6227                             mac_addr->addr_bytes[1],
6228                             mac_addr->addr_bytes[2],
6229                             mac_addr->addr_bytes[3],
6230                             mac_addr->addr_bytes[4],
6231                             mac_addr->addr_bytes[5],
6232                             diag);
6233         return diag;
6234 }
6235
6236 static void
6237 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
6238 {
6239         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6240         struct rte_ether_addr *perm_addr =
6241                 (struct rte_ether_addr *)hw->mac.perm_addr;
6242         struct rte_ether_addr *mac_addr;
6243         uint32_t i;
6244         int diag;
6245
6246         /*
6247          * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
6248          * not support the deletion of a given MAC address.
6249          * Instead, it imposes to delete all MAC addresses, then to add again
6250          * all MAC addresses with the exception of the one to be deleted.
6251          */
6252         (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
6253
6254         /*
6255          * Add again all MAC addresses, with the exception of the deleted one
6256          * and of the permanent MAC address.
6257          */
6258         for (i = 0, mac_addr = dev->data->mac_addrs;
6259              i < hw->mac.num_rar_entries; i++, mac_addr++) {
6260                 /* Skip the deleted MAC address */
6261                 if (i == index)
6262                         continue;
6263                 /* Skip NULL MAC addresses */
6264                 if (rte_is_zero_ether_addr(mac_addr))
6265                         continue;
6266                 /* Skip the permanent MAC address */
6267                 if (memcmp(perm_addr, mac_addr,
6268                                 sizeof(struct rte_ether_addr)) == 0)
6269                         continue;
6270                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6271                 if (diag != 0)
6272                         PMD_DRV_LOG(ERR,
6273                                     "Adding again MAC address "
6274                                     "%02x:%02x:%02x:%02x:%02x:%02x failed "
6275                                     "diag=%d",
6276                                     mac_addr->addr_bytes[0],
6277                                     mac_addr->addr_bytes[1],
6278                                     mac_addr->addr_bytes[2],
6279                                     mac_addr->addr_bytes[3],
6280                                     mac_addr->addr_bytes[4],
6281                                     mac_addr->addr_bytes[5],
6282                                     diag);
6283         }
6284 }
6285
6286 static int
6287 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
6288                         struct rte_ether_addr *addr)
6289 {
6290         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6291
6292         hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
6293
6294         return 0;
6295 }
6296
6297 int
6298 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
6299                         struct rte_eth_syn_filter *filter,
6300                         bool add)
6301 {
6302         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6303         struct ixgbe_filter_info *filter_info =
6304                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6305         uint32_t syn_info;
6306         uint32_t synqf;
6307
6308         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6309                 return -EINVAL;
6310
6311         syn_info = filter_info->syn_info;
6312
6313         if (add) {
6314                 if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6315                         return -EINVAL;
6316                 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6317                         IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6318
6319                 if (filter->hig_pri)
6320                         synqf |= IXGBE_SYN_FILTER_SYNQFP;
6321                 else
6322                         synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6323         } else {
6324                 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6325                 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6326                         return -ENOENT;
6327                 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6328         }
6329
6330         filter_info->syn_info = synqf;
6331         IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6332         IXGBE_WRITE_FLUSH(hw);
6333         return 0;
6334 }
6335
6336 static int
6337 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6338                         struct rte_eth_syn_filter *filter)
6339 {
6340         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6341         uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6342
6343         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6344                 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6345                 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6346                 return 0;
6347         }
6348         return -ENOENT;
6349 }
6350
6351 static int
6352 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6353                         enum rte_filter_op filter_op,
6354                         void *arg)
6355 {
6356         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6357         int ret;
6358
6359         MAC_TYPE_FILTER_SUP(hw->mac.type);
6360
6361         if (filter_op == RTE_ETH_FILTER_NOP)
6362                 return 0;
6363
6364         if (arg == NULL) {
6365                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6366                             filter_op);
6367                 return -EINVAL;
6368         }
6369
6370         switch (filter_op) {
6371         case RTE_ETH_FILTER_ADD:
6372                 ret = ixgbe_syn_filter_set(dev,
6373                                 (struct rte_eth_syn_filter *)arg,
6374                                 TRUE);
6375                 break;
6376         case RTE_ETH_FILTER_DELETE:
6377                 ret = ixgbe_syn_filter_set(dev,
6378                                 (struct rte_eth_syn_filter *)arg,
6379                                 FALSE);
6380                 break;
6381         case RTE_ETH_FILTER_GET:
6382                 ret = ixgbe_syn_filter_get(dev,
6383                                 (struct rte_eth_syn_filter *)arg);
6384                 break;
6385         default:
6386                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6387                 ret = -EINVAL;
6388                 break;
6389         }
6390
6391         return ret;
6392 }
6393
6394
6395 static inline enum ixgbe_5tuple_protocol
6396 convert_protocol_type(uint8_t protocol_value)
6397 {
6398         if (protocol_value == IPPROTO_TCP)
6399                 return IXGBE_FILTER_PROTOCOL_TCP;
6400         else if (protocol_value == IPPROTO_UDP)
6401                 return IXGBE_FILTER_PROTOCOL_UDP;
6402         else if (protocol_value == IPPROTO_SCTP)
6403                 return IXGBE_FILTER_PROTOCOL_SCTP;
6404         else
6405                 return IXGBE_FILTER_PROTOCOL_NONE;
6406 }
6407
6408 /* inject a 5-tuple filter to HW */
6409 static inline void
6410 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6411                            struct ixgbe_5tuple_filter *filter)
6412 {
6413         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6414         int i;
6415         uint32_t ftqf, sdpqf;
6416         uint32_t l34timir = 0;
6417         uint8_t mask = 0xff;
6418
6419         i = filter->index;
6420
6421         sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6422                                 IXGBE_SDPQF_DSTPORT_SHIFT);
6423         sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6424
6425         ftqf = (uint32_t)(filter->filter_info.proto &
6426                 IXGBE_FTQF_PROTOCOL_MASK);
6427         ftqf |= (uint32_t)((filter->filter_info.priority &
6428                 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6429         if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6430                 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6431         if (filter->filter_info.dst_ip_mask == 0)
6432                 mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6433         if (filter->filter_info.src_port_mask == 0)
6434                 mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6435         if (filter->filter_info.dst_port_mask == 0)
6436                 mask &= IXGBE_FTQF_DEST_PORT_MASK;
6437         if (filter->filter_info.proto_mask == 0)
6438                 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6439         ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6440         ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6441         ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6442
6443         IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6444         IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6445         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6446         IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6447
6448         l34timir |= IXGBE_L34T_IMIR_RESERVE;
6449         l34timir |= (uint32_t)(filter->queue <<
6450                                 IXGBE_L34T_IMIR_QUEUE_SHIFT);
6451         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6452 }
6453
6454 /*
6455  * add a 5tuple filter
6456  *
6457  * @param
6458  * dev: Pointer to struct rte_eth_dev.
6459  * index: the index the filter allocates.
6460  * filter: ponter to the filter that will be added.
6461  * rx_queue: the queue id the filter assigned to.
6462  *
6463  * @return
6464  *    - On success, zero.
6465  *    - On failure, a negative value.
6466  */
6467 static int
6468 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6469                         struct ixgbe_5tuple_filter *filter)
6470 {
6471         struct ixgbe_filter_info *filter_info =
6472                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6473         int i, idx, shift;
6474
6475         /*
6476          * look for an unused 5tuple filter index,
6477          * and insert the filter to list.
6478          */
6479         for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6480                 idx = i / (sizeof(uint32_t) * NBBY);
6481                 shift = i % (sizeof(uint32_t) * NBBY);
6482                 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6483                         filter_info->fivetuple_mask[idx] |= 1 << shift;
6484                         filter->index = i;
6485                         TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6486                                           filter,
6487                                           entries);
6488                         break;
6489                 }
6490         }
6491         if (i >= IXGBE_MAX_FTQF_FILTERS) {
6492                 PMD_DRV_LOG(ERR, "5tuple filters are full.");
6493                 return -ENOSYS;
6494         }
6495
6496         ixgbe_inject_5tuple_filter(dev, filter);
6497
6498         return 0;
6499 }
6500
6501 /*
6502  * remove a 5tuple filter
6503  *
6504  * @param
6505  * dev: Pointer to struct rte_eth_dev.
6506  * filter: the pointer of the filter will be removed.
6507  */
6508 static void
6509 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6510                         struct ixgbe_5tuple_filter *filter)
6511 {
6512         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6513         struct ixgbe_filter_info *filter_info =
6514                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6515         uint16_t index = filter->index;
6516
6517         filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6518                                 ~(1 << (index % (sizeof(uint32_t) * NBBY)));
6519         TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6520         rte_free(filter);
6521
6522         IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6523         IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6524         IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6525         IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6526         IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6527 }
6528
6529 static int
6530 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6531 {
6532         struct ixgbe_hw *hw;
6533         uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD;
6534         struct rte_eth_dev_data *dev_data = dev->data;
6535
6536         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6537
6538         if (mtu < RTE_ETHER_MIN_MTU ||
6539                         max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
6540                 return -EINVAL;
6541
6542         /* If device is started, refuse mtu that requires the support of
6543          * scattered packets when this feature has not been enabled before.
6544          */
6545         if (dev_data->dev_started && !dev_data->scattered_rx &&
6546             (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6547              dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
6548                 PMD_INIT_LOG(ERR, "Stop port first.");
6549                 return -EINVAL;
6550         }
6551
6552         /*
6553          * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6554          * request of the version 2.0 of the mailbox API.
6555          * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6556          * of the mailbox API.
6557          * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6558          * prior to 3.11.33 which contains the following change:
6559          * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6560          */
6561         ixgbevf_rlpml_set_vf(hw, max_frame);
6562
6563         /* update max frame size */
6564         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6565         return 0;
6566 }
6567
6568 static inline struct ixgbe_5tuple_filter *
6569 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6570                         struct ixgbe_5tuple_filter_info *key)
6571 {
6572         struct ixgbe_5tuple_filter *it;
6573
6574         TAILQ_FOREACH(it, filter_list, entries) {
6575                 if (memcmp(key, &it->filter_info,
6576                         sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6577                         return it;
6578                 }
6579         }
6580         return NULL;
6581 }
6582
6583 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6584 static inline int
6585 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6586                         struct ixgbe_5tuple_filter_info *filter_info)
6587 {
6588         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6589                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6590                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
6591                 return -EINVAL;
6592
6593         switch (filter->dst_ip_mask) {
6594         case UINT32_MAX:
6595                 filter_info->dst_ip_mask = 0;
6596                 filter_info->dst_ip = filter->dst_ip;
6597                 break;
6598         case 0:
6599                 filter_info->dst_ip_mask = 1;
6600                 break;
6601         default:
6602                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6603                 return -EINVAL;
6604         }
6605
6606         switch (filter->src_ip_mask) {
6607         case UINT32_MAX:
6608                 filter_info->src_ip_mask = 0;
6609                 filter_info->src_ip = filter->src_ip;
6610                 break;
6611         case 0:
6612                 filter_info->src_ip_mask = 1;
6613                 break;
6614         default:
6615                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6616                 return -EINVAL;
6617         }
6618
6619         switch (filter->dst_port_mask) {
6620         case UINT16_MAX:
6621                 filter_info->dst_port_mask = 0;
6622                 filter_info->dst_port = filter->dst_port;
6623                 break;
6624         case 0:
6625                 filter_info->dst_port_mask = 1;
6626                 break;
6627         default:
6628                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6629                 return -EINVAL;
6630         }
6631
6632         switch (filter->src_port_mask) {
6633         case UINT16_MAX:
6634                 filter_info->src_port_mask = 0;
6635                 filter_info->src_port = filter->src_port;
6636                 break;
6637         case 0:
6638                 filter_info->src_port_mask = 1;
6639                 break;
6640         default:
6641                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
6642                 return -EINVAL;
6643         }
6644
6645         switch (filter->proto_mask) {
6646         case UINT8_MAX:
6647                 filter_info->proto_mask = 0;
6648                 filter_info->proto =
6649                         convert_protocol_type(filter->proto);
6650                 break;
6651         case 0:
6652                 filter_info->proto_mask = 1;
6653                 break;
6654         default:
6655                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
6656                 return -EINVAL;
6657         }
6658
6659         filter_info->priority = (uint8_t)filter->priority;
6660         return 0;
6661 }
6662
6663 /*
6664  * add or delete a ntuple filter
6665  *
6666  * @param
6667  * dev: Pointer to struct rte_eth_dev.
6668  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6669  * add: if true, add filter, if false, remove filter
6670  *
6671  * @return
6672  *    - On success, zero.
6673  *    - On failure, a negative value.
6674  */
6675 int
6676 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6677                         struct rte_eth_ntuple_filter *ntuple_filter,
6678                         bool add)
6679 {
6680         struct ixgbe_filter_info *filter_info =
6681                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6682         struct ixgbe_5tuple_filter_info filter_5tuple;
6683         struct ixgbe_5tuple_filter *filter;
6684         int ret;
6685
6686         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6687                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6688                 return -EINVAL;
6689         }
6690
6691         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6692         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6693         if (ret < 0)
6694                 return ret;
6695
6696         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6697                                          &filter_5tuple);
6698         if (filter != NULL && add) {
6699                 PMD_DRV_LOG(ERR, "filter exists.");
6700                 return -EEXIST;
6701         }
6702         if (filter == NULL && !add) {
6703                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6704                 return -ENOENT;
6705         }
6706
6707         if (add) {
6708                 filter = rte_zmalloc("ixgbe_5tuple_filter",
6709                                 sizeof(struct ixgbe_5tuple_filter), 0);
6710                 if (filter == NULL)
6711                         return -ENOMEM;
6712                 rte_memcpy(&filter->filter_info,
6713                                  &filter_5tuple,
6714                                  sizeof(struct ixgbe_5tuple_filter_info));
6715                 filter->queue = ntuple_filter->queue;
6716                 ret = ixgbe_add_5tuple_filter(dev, filter);
6717                 if (ret < 0) {
6718                         rte_free(filter);
6719                         return ret;
6720                 }
6721         } else
6722                 ixgbe_remove_5tuple_filter(dev, filter);
6723
6724         return 0;
6725 }
6726
6727 /*
6728  * get a ntuple filter
6729  *
6730  * @param
6731  * dev: Pointer to struct rte_eth_dev.
6732  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6733  *
6734  * @return
6735  *    - On success, zero.
6736  *    - On failure, a negative value.
6737  */
6738 static int
6739 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6740                         struct rte_eth_ntuple_filter *ntuple_filter)
6741 {
6742         struct ixgbe_filter_info *filter_info =
6743                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6744         struct ixgbe_5tuple_filter_info filter_5tuple;
6745         struct ixgbe_5tuple_filter *filter;
6746         int ret;
6747
6748         if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6749                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6750                 return -EINVAL;
6751         }
6752
6753         memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6754         ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6755         if (ret < 0)
6756                 return ret;
6757
6758         filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6759                                          &filter_5tuple);
6760         if (filter == NULL) {
6761                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
6762                 return -ENOENT;
6763         }
6764         ntuple_filter->queue = filter->queue;
6765         return 0;
6766 }
6767
6768 /*
6769  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6770  * @dev: pointer to rte_eth_dev structure
6771  * @filter_op:operation will be taken.
6772  * @arg: a pointer to specific structure corresponding to the filter_op
6773  *
6774  * @return
6775  *    - On success, zero.
6776  *    - On failure, a negative value.
6777  */
6778 static int
6779 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6780                                 enum rte_filter_op filter_op,
6781                                 void *arg)
6782 {
6783         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6784         int ret;
6785
6786         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6787
6788         if (filter_op == RTE_ETH_FILTER_NOP)
6789                 return 0;
6790
6791         if (arg == NULL) {
6792                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6793                             filter_op);
6794                 return -EINVAL;
6795         }
6796
6797         switch (filter_op) {
6798         case RTE_ETH_FILTER_ADD:
6799                 ret = ixgbe_add_del_ntuple_filter(dev,
6800                         (struct rte_eth_ntuple_filter *)arg,
6801                         TRUE);
6802                 break;
6803         case RTE_ETH_FILTER_DELETE:
6804                 ret = ixgbe_add_del_ntuple_filter(dev,
6805                         (struct rte_eth_ntuple_filter *)arg,
6806                         FALSE);
6807                 break;
6808         case RTE_ETH_FILTER_GET:
6809                 ret = ixgbe_get_ntuple_filter(dev,
6810                         (struct rte_eth_ntuple_filter *)arg);
6811                 break;
6812         default:
6813                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6814                 ret = -EINVAL;
6815                 break;
6816         }
6817         return ret;
6818 }
6819
6820 int
6821 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6822                         struct rte_eth_ethertype_filter *filter,
6823                         bool add)
6824 {
6825         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6826         struct ixgbe_filter_info *filter_info =
6827                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6828         uint32_t etqf = 0;
6829         uint32_t etqs = 0;
6830         int ret;
6831         struct ixgbe_ethertype_filter ethertype_filter;
6832
6833         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6834                 return -EINVAL;
6835
6836         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
6837                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
6838                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6839                         " ethertype filter.", filter->ether_type);
6840                 return -EINVAL;
6841         }
6842
6843         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6844                 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6845                 return -EINVAL;
6846         }
6847         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6848                 PMD_DRV_LOG(ERR, "drop option is unsupported.");
6849                 return -EINVAL;
6850         }
6851
6852         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6853         if (ret >= 0 && add) {
6854                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6855                             filter->ether_type);
6856                 return -EEXIST;
6857         }
6858         if (ret < 0 && !add) {
6859                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6860                             filter->ether_type);
6861                 return -ENOENT;
6862         }
6863
6864         if (add) {
6865                 etqf = IXGBE_ETQF_FILTER_EN;
6866                 etqf |= (uint32_t)filter->ether_type;
6867                 etqs |= (uint32_t)((filter->queue <<
6868                                     IXGBE_ETQS_RX_QUEUE_SHIFT) &
6869                                     IXGBE_ETQS_RX_QUEUE);
6870                 etqs |= IXGBE_ETQS_QUEUE_EN;
6871
6872                 ethertype_filter.ethertype = filter->ether_type;
6873                 ethertype_filter.etqf = etqf;
6874                 ethertype_filter.etqs = etqs;
6875                 ethertype_filter.conf = FALSE;
6876                 ret = ixgbe_ethertype_filter_insert(filter_info,
6877                                                     &ethertype_filter);
6878                 if (ret < 0) {
6879                         PMD_DRV_LOG(ERR, "ethertype filters are full.");
6880                         return -ENOSPC;
6881                 }
6882         } else {
6883                 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6884                 if (ret < 0)
6885                         return -ENOSYS;
6886         }
6887         IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6888         IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6889         IXGBE_WRITE_FLUSH(hw);
6890
6891         return 0;
6892 }
6893
6894 static int
6895 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6896                         struct rte_eth_ethertype_filter *filter)
6897 {
6898         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6899         struct ixgbe_filter_info *filter_info =
6900                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6901         uint32_t etqf, etqs;
6902         int ret;
6903
6904         ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6905         if (ret < 0) {
6906                 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6907                             filter->ether_type);
6908                 return -ENOENT;
6909         }
6910
6911         etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6912         if (etqf & IXGBE_ETQF_FILTER_EN) {
6913                 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6914                 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6915                 filter->flags = 0;
6916                 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6917                                IXGBE_ETQS_RX_QUEUE_SHIFT;
6918                 return 0;
6919         }
6920         return -ENOENT;
6921 }
6922
6923 /*
6924  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6925  * @dev: pointer to rte_eth_dev structure
6926  * @filter_op:operation will be taken.
6927  * @arg: a pointer to specific structure corresponding to the filter_op
6928  */
6929 static int
6930 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6931                                 enum rte_filter_op filter_op,
6932                                 void *arg)
6933 {
6934         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6935         int ret;
6936
6937         MAC_TYPE_FILTER_SUP(hw->mac.type);
6938
6939         if (filter_op == RTE_ETH_FILTER_NOP)
6940                 return 0;
6941
6942         if (arg == NULL) {
6943                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6944                             filter_op);
6945                 return -EINVAL;
6946         }
6947
6948         switch (filter_op) {
6949         case RTE_ETH_FILTER_ADD:
6950                 ret = ixgbe_add_del_ethertype_filter(dev,
6951                         (struct rte_eth_ethertype_filter *)arg,
6952                         TRUE);
6953                 break;
6954         case RTE_ETH_FILTER_DELETE:
6955                 ret = ixgbe_add_del_ethertype_filter(dev,
6956                         (struct rte_eth_ethertype_filter *)arg,
6957                         FALSE);
6958                 break;
6959         case RTE_ETH_FILTER_GET:
6960                 ret = ixgbe_get_ethertype_filter(dev,
6961                         (struct rte_eth_ethertype_filter *)arg);
6962                 break;
6963         default:
6964                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6965                 ret = -EINVAL;
6966                 break;
6967         }
6968         return ret;
6969 }
6970
6971 static int
6972 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6973                      enum rte_filter_type filter_type,
6974                      enum rte_filter_op filter_op,
6975                      void *arg)
6976 {
6977         int ret = 0;
6978
6979         switch (filter_type) {
6980         case RTE_ETH_FILTER_NTUPLE:
6981                 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6982                 break;
6983         case RTE_ETH_FILTER_ETHERTYPE:
6984                 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6985                 break;
6986         case RTE_ETH_FILTER_SYN:
6987                 ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6988                 break;
6989         case RTE_ETH_FILTER_FDIR:
6990                 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6991                 break;
6992         case RTE_ETH_FILTER_L2_TUNNEL:
6993                 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6994                 break;
6995         case RTE_ETH_FILTER_GENERIC:
6996                 if (filter_op != RTE_ETH_FILTER_GET)
6997                         return -EINVAL;
6998                 *(const void **)arg = &ixgbe_flow_ops;
6999                 break;
7000         default:
7001                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
7002                                                         filter_type);
7003                 ret = -EINVAL;
7004                 break;
7005         }
7006
7007         return ret;
7008 }
7009
7010 static u8 *
7011 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
7012                         u8 **mc_addr_ptr, u32 *vmdq)
7013 {
7014         u8 *mc_addr;
7015
7016         *vmdq = 0;
7017         mc_addr = *mc_addr_ptr;
7018         *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
7019         return mc_addr;
7020 }
7021
7022 static int
7023 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
7024                           struct rte_ether_addr *mc_addr_set,
7025                           uint32_t nb_mc_addr)
7026 {
7027         struct ixgbe_hw *hw;
7028         u8 *mc_addr_list;
7029
7030         hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7031         mc_addr_list = (u8 *)mc_addr_set;
7032         return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
7033                                          ixgbe_dev_addr_list_itr, TRUE);
7034 }
7035
7036 static uint64_t
7037 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
7038 {
7039         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7040         uint64_t systime_cycles;
7041
7042         switch (hw->mac.type) {
7043         case ixgbe_mac_X550:
7044         case ixgbe_mac_X550EM_x:
7045         case ixgbe_mac_X550EM_a:
7046                 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
7047                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
7048                 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
7049                                 * NSEC_PER_SEC;
7050                 break;
7051         default:
7052                 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
7053                 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
7054                                 << 32;
7055         }
7056
7057         return systime_cycles;
7058 }
7059
7060 static uint64_t
7061 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
7062 {
7063         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7064         uint64_t rx_tstamp_cycles;
7065
7066         switch (hw->mac.type) {
7067         case ixgbe_mac_X550:
7068         case ixgbe_mac_X550EM_x:
7069         case ixgbe_mac_X550EM_a:
7070                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
7071                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
7072                 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
7073                                 * NSEC_PER_SEC;
7074                 break;
7075         default:
7076                 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
7077                 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
7078                 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
7079                                 << 32;
7080         }
7081
7082         return rx_tstamp_cycles;
7083 }
7084
7085 static uint64_t
7086 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
7087 {
7088         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7089         uint64_t tx_tstamp_cycles;
7090
7091         switch (hw->mac.type) {
7092         case ixgbe_mac_X550:
7093         case ixgbe_mac_X550EM_x:
7094         case ixgbe_mac_X550EM_a:
7095                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
7096                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
7097                 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
7098                                 * NSEC_PER_SEC;
7099                 break;
7100         default:
7101                 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
7102                 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
7103                 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
7104                                 << 32;
7105         }
7106
7107         return tx_tstamp_cycles;
7108 }
7109
7110 static void
7111 ixgbe_start_timecounters(struct rte_eth_dev *dev)
7112 {
7113         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7114         struct ixgbe_adapter *adapter = dev->data->dev_private;
7115         struct rte_eth_link link;
7116         uint32_t incval = 0;
7117         uint32_t shift = 0;
7118
7119         /* Get current link speed. */
7120         ixgbe_dev_link_update(dev, 1);
7121         rte_eth_linkstatus_get(dev, &link);
7122
7123         switch (link.link_speed) {
7124         case ETH_SPEED_NUM_100M:
7125                 incval = IXGBE_INCVAL_100;
7126                 shift = IXGBE_INCVAL_SHIFT_100;
7127                 break;
7128         case ETH_SPEED_NUM_1G:
7129                 incval = IXGBE_INCVAL_1GB;
7130                 shift = IXGBE_INCVAL_SHIFT_1GB;
7131                 break;
7132         case ETH_SPEED_NUM_10G:
7133         default:
7134                 incval = IXGBE_INCVAL_10GB;
7135                 shift = IXGBE_INCVAL_SHIFT_10GB;
7136                 break;
7137         }
7138
7139         switch (hw->mac.type) {
7140         case ixgbe_mac_X550:
7141         case ixgbe_mac_X550EM_x:
7142         case ixgbe_mac_X550EM_a:
7143                 /* Independent of link speed. */
7144                 incval = 1;
7145                 /* Cycles read will be interpreted as ns. */
7146                 shift = 0;
7147                 /* Fall-through */
7148         case ixgbe_mac_X540:
7149                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
7150                 break;
7151         case ixgbe_mac_82599EB:
7152                 incval >>= IXGBE_INCVAL_SHIFT_82599;
7153                 shift -= IXGBE_INCVAL_SHIFT_82599;
7154                 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
7155                                 (1 << IXGBE_INCPER_SHIFT_82599) | incval);
7156                 break;
7157         default:
7158                 /* Not supported. */
7159                 return;
7160         }
7161
7162         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
7163         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7164         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7165
7166         adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7167         adapter->systime_tc.cc_shift = shift;
7168         adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
7169
7170         adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7171         adapter->rx_tstamp_tc.cc_shift = shift;
7172         adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7173
7174         adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7175         adapter->tx_tstamp_tc.cc_shift = shift;
7176         adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7177 }
7178
7179 static int
7180 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
7181 {
7182         struct ixgbe_adapter *adapter = dev->data->dev_private;
7183
7184         adapter->systime_tc.nsec += delta;
7185         adapter->rx_tstamp_tc.nsec += delta;
7186         adapter->tx_tstamp_tc.nsec += delta;
7187
7188         return 0;
7189 }
7190
7191 static int
7192 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
7193 {
7194         uint64_t ns;
7195         struct ixgbe_adapter *adapter = dev->data->dev_private;
7196
7197         ns = rte_timespec_to_ns(ts);
7198         /* Set the timecounters to a new value. */
7199         adapter->systime_tc.nsec = ns;
7200         adapter->rx_tstamp_tc.nsec = ns;
7201         adapter->tx_tstamp_tc.nsec = ns;
7202
7203         return 0;
7204 }
7205
7206 static int
7207 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
7208 {
7209         uint64_t ns, systime_cycles;
7210         struct ixgbe_adapter *adapter = dev->data->dev_private;
7211
7212         systime_cycles = ixgbe_read_systime_cyclecounter(dev);
7213         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
7214         *ts = rte_ns_to_timespec(ns);
7215
7216         return 0;
7217 }
7218
7219 static int
7220 ixgbe_timesync_enable(struct rte_eth_dev *dev)
7221 {
7222         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7223         uint32_t tsync_ctl;
7224         uint32_t tsauxc;
7225
7226         /* Stop the timesync system time. */
7227         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
7228         /* Reset the timesync system time value. */
7229         IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
7230         IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
7231
7232         /* Enable system time for platforms where it isn't on by default. */
7233         tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
7234         tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
7235         IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
7236
7237         ixgbe_start_timecounters(dev);
7238
7239         /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7240         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
7241                         (RTE_ETHER_TYPE_1588 |
7242                          IXGBE_ETQF_FILTER_EN |
7243                          IXGBE_ETQF_1588));
7244
7245         /* Enable timestamping of received PTP packets. */
7246         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7247         tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
7248         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7249
7250         /* Enable timestamping of transmitted PTP packets. */
7251         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7252         tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
7253         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7254
7255         IXGBE_WRITE_FLUSH(hw);
7256
7257         return 0;
7258 }
7259
7260 static int
7261 ixgbe_timesync_disable(struct rte_eth_dev *dev)
7262 {
7263         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7264         uint32_t tsync_ctl;
7265
7266         /* Disable timestamping of transmitted PTP packets. */
7267         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7268         tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
7269         IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7270
7271         /* Disable timestamping of received PTP packets. */
7272         tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7273         tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
7274         IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7275
7276         /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7277         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
7278
7279         /* Stop incrementating the System Time registers. */
7280         IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
7281
7282         return 0;
7283 }
7284
7285 static int
7286 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7287                                  struct timespec *timestamp,
7288                                  uint32_t flags __rte_unused)
7289 {
7290         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7291         struct ixgbe_adapter *adapter = dev->data->dev_private;
7292         uint32_t tsync_rxctl;
7293         uint64_t rx_tstamp_cycles;
7294         uint64_t ns;
7295
7296         tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7297         if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
7298                 return -EINVAL;
7299
7300         rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
7301         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
7302         *timestamp = rte_ns_to_timespec(ns);
7303
7304         return  0;
7305 }
7306
7307 static int
7308 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7309                                  struct timespec *timestamp)
7310 {
7311         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7312         struct ixgbe_adapter *adapter = dev->data->dev_private;
7313         uint32_t tsync_txctl;
7314         uint64_t tx_tstamp_cycles;
7315         uint64_t ns;
7316
7317         tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7318         if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7319                 return -EINVAL;
7320
7321         tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7322         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7323         *timestamp = rte_ns_to_timespec(ns);
7324
7325         return 0;
7326 }
7327
7328 static int
7329 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7330 {
7331         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7332         int count = 0;
7333         int g_ind = 0;
7334         const struct reg_info *reg_group;
7335         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7336                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7337
7338         while ((reg_group = reg_set[g_ind++]))
7339                 count += ixgbe_regs_group_count(reg_group);
7340
7341         return count;
7342 }
7343
7344 static int
7345 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7346 {
7347         int count = 0;
7348         int g_ind = 0;
7349         const struct reg_info *reg_group;
7350
7351         while ((reg_group = ixgbevf_regs[g_ind++]))
7352                 count += ixgbe_regs_group_count(reg_group);
7353
7354         return count;
7355 }
7356
7357 static int
7358 ixgbe_get_regs(struct rte_eth_dev *dev,
7359               struct rte_dev_reg_info *regs)
7360 {
7361         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7362         uint32_t *data = regs->data;
7363         int g_ind = 0;
7364         int count = 0;
7365         const struct reg_info *reg_group;
7366         const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7367                                     ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7368
7369         if (data == NULL) {
7370                 regs->length = ixgbe_get_reg_length(dev);
7371                 regs->width = sizeof(uint32_t);
7372                 return 0;
7373         }
7374
7375         /* Support only full register dump */
7376         if ((regs->length == 0) ||
7377             (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7378                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7379                         hw->device_id;
7380                 while ((reg_group = reg_set[g_ind++]))
7381                         count += ixgbe_read_regs_group(dev, &data[count],
7382                                 reg_group);
7383                 return 0;
7384         }
7385
7386         return -ENOTSUP;
7387 }
7388
7389 static int
7390 ixgbevf_get_regs(struct rte_eth_dev *dev,
7391                 struct rte_dev_reg_info *regs)
7392 {
7393         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7394         uint32_t *data = regs->data;
7395         int g_ind = 0;
7396         int count = 0;
7397         const struct reg_info *reg_group;
7398
7399         if (data == NULL) {
7400                 regs->length = ixgbevf_get_reg_length(dev);
7401                 regs->width = sizeof(uint32_t);
7402                 return 0;
7403         }
7404
7405         /* Support only full register dump */
7406         if ((regs->length == 0) ||
7407             (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7408                 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7409                         hw->device_id;
7410                 while ((reg_group = ixgbevf_regs[g_ind++]))
7411                         count += ixgbe_read_regs_group(dev, &data[count],
7412                                                       reg_group);
7413                 return 0;
7414         }
7415
7416         return -ENOTSUP;
7417 }
7418
7419 static int
7420 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7421 {
7422         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7423
7424         /* Return unit is byte count */
7425         return hw->eeprom.word_size * 2;
7426 }
7427
7428 static int
7429 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7430                 struct rte_dev_eeprom_info *in_eeprom)
7431 {
7432         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7433         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7434         uint16_t *data = in_eeprom->data;
7435         int first, length;
7436
7437         first = in_eeprom->offset >> 1;
7438         length = in_eeprom->length >> 1;
7439         if ((first > hw->eeprom.word_size) ||
7440             ((first + length) > hw->eeprom.word_size))
7441                 return -EINVAL;
7442
7443         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7444
7445         return eeprom->ops.read_buffer(hw, first, length, data);
7446 }
7447
7448 static int
7449 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7450                 struct rte_dev_eeprom_info *in_eeprom)
7451 {
7452         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7453         struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7454         uint16_t *data = in_eeprom->data;
7455         int first, length;
7456
7457         first = in_eeprom->offset >> 1;
7458         length = in_eeprom->length >> 1;
7459         if ((first > hw->eeprom.word_size) ||
7460             ((first + length) > hw->eeprom.word_size))
7461                 return -EINVAL;
7462
7463         in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7464
7465         return eeprom->ops.write_buffer(hw,  first, length, data);
7466 }
7467
7468 static int
7469 ixgbe_get_module_info(struct rte_eth_dev *dev,
7470                       struct rte_eth_dev_module_info *modinfo)
7471 {
7472         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7473         uint32_t status;
7474         uint8_t sff8472_rev, addr_mode;
7475         bool page_swap = false;
7476
7477         /* Check whether we support SFF-8472 or not */
7478         status = hw->phy.ops.read_i2c_eeprom(hw,
7479                                              IXGBE_SFF_SFF_8472_COMP,
7480                                              &sff8472_rev);
7481         if (status != 0)
7482                 return -EIO;
7483
7484         /* addressing mode is not supported */
7485         status = hw->phy.ops.read_i2c_eeprom(hw,
7486                                              IXGBE_SFF_SFF_8472_SWAP,
7487                                              &addr_mode);
7488         if (status != 0)
7489                 return -EIO;
7490
7491         if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
7492                 PMD_DRV_LOG(ERR,
7493                             "Address change required to access page 0xA2, "
7494                             "but not supported. Please report the module "
7495                             "type to the driver maintainers.");
7496                 page_swap = true;
7497         }
7498
7499         if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
7500                 /* We have a SFP, but it does not support SFF-8472 */
7501                 modinfo->type = RTE_ETH_MODULE_SFF_8079;
7502                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
7503         } else {
7504                 /* We have a SFP which supports a revision of SFF-8472. */
7505                 modinfo->type = RTE_ETH_MODULE_SFF_8472;
7506                 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
7507         }
7508
7509         return 0;
7510 }
7511
7512 static int
7513 ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
7514                         struct rte_dev_eeprom_info *info)
7515 {
7516         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7517         uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
7518         uint8_t databyte = 0xFF;
7519         uint8_t *data = info->data;
7520         uint32_t i = 0;
7521
7522         if (info->length == 0)
7523                 return -EINVAL;
7524
7525         for (i = info->offset; i < info->offset + info->length; i++) {
7526                 if (i < RTE_ETH_MODULE_SFF_8079_LEN)
7527                         status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
7528                 else
7529                         status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
7530
7531                 if (status != 0)
7532                         return -EIO;
7533
7534                 data[i - info->offset] = databyte;
7535         }
7536
7537         return 0;
7538 }
7539
7540 uint16_t
7541 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7542         switch (mac_type) {
7543         case ixgbe_mac_X550:
7544         case ixgbe_mac_X550EM_x:
7545         case ixgbe_mac_X550EM_a:
7546                 return ETH_RSS_RETA_SIZE_512;
7547         case ixgbe_mac_X550_vf:
7548         case ixgbe_mac_X550EM_x_vf:
7549         case ixgbe_mac_X550EM_a_vf:
7550                 return ETH_RSS_RETA_SIZE_64;
7551         case ixgbe_mac_X540_vf:
7552         case ixgbe_mac_82599_vf:
7553                 return 0;
7554         default:
7555                 return ETH_RSS_RETA_SIZE_128;
7556         }
7557 }
7558
7559 uint32_t
7560 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7561         switch (mac_type) {
7562         case ixgbe_mac_X550:
7563         case ixgbe_mac_X550EM_x:
7564         case ixgbe_mac_X550EM_a:
7565                 if (reta_idx < ETH_RSS_RETA_SIZE_128)
7566                         return IXGBE_RETA(reta_idx >> 2);
7567                 else
7568                         return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7569         case ixgbe_mac_X550_vf:
7570         case ixgbe_mac_X550EM_x_vf:
7571         case ixgbe_mac_X550EM_a_vf:
7572                 return IXGBE_VFRETA(reta_idx >> 2);
7573         default:
7574                 return IXGBE_RETA(reta_idx >> 2);
7575         }
7576 }
7577
7578 uint32_t
7579 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7580         switch (mac_type) {
7581         case ixgbe_mac_X550_vf:
7582         case ixgbe_mac_X550EM_x_vf:
7583         case ixgbe_mac_X550EM_a_vf:
7584                 return IXGBE_VFMRQC;
7585         default:
7586                 return IXGBE_MRQC;
7587         }
7588 }
7589
7590 uint32_t
7591 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7592         switch (mac_type) {
7593         case ixgbe_mac_X550_vf:
7594         case ixgbe_mac_X550EM_x_vf:
7595         case ixgbe_mac_X550EM_a_vf:
7596                 return IXGBE_VFRSSRK(i);
7597         default:
7598                 return IXGBE_RSSRK(i);
7599         }
7600 }
7601
7602 bool
7603 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7604         switch (mac_type) {
7605         case ixgbe_mac_82599_vf:
7606         case ixgbe_mac_X540_vf:
7607                 return 0;
7608         default:
7609                 return 1;
7610         }
7611 }
7612
7613 static int
7614 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7615                         struct rte_eth_dcb_info *dcb_info)
7616 {
7617         struct ixgbe_dcb_config *dcb_config =
7618                         IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7619         struct ixgbe_dcb_tc_config *tc;
7620         struct rte_eth_dcb_tc_queue_mapping *tc_queue;
7621         uint8_t nb_tcs;
7622         uint8_t i, j;
7623
7624         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7625                 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7626         else
7627                 dcb_info->nb_tcs = 1;
7628
7629         tc_queue = &dcb_info->tc_queue;
7630         nb_tcs = dcb_info->nb_tcs;
7631
7632         if (dcb_config->vt_mode) { /* vt is enabled*/
7633                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7634                                 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7635                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7636                         dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7637                 if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
7638                         for (j = 0; j < nb_tcs; j++) {
7639                                 tc_queue->tc_rxq[0][j].base = j;
7640                                 tc_queue->tc_rxq[0][j].nb_queue = 1;
7641                                 tc_queue->tc_txq[0][j].base = j;
7642                                 tc_queue->tc_txq[0][j].nb_queue = 1;
7643                         }
7644                 } else {
7645                         for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7646                                 for (j = 0; j < nb_tcs; j++) {
7647                                         tc_queue->tc_rxq[i][j].base =
7648                                                 i * nb_tcs + j;
7649                                         tc_queue->tc_rxq[i][j].nb_queue = 1;
7650                                         tc_queue->tc_txq[i][j].base =
7651                                                 i * nb_tcs + j;
7652                                         tc_queue->tc_txq[i][j].nb_queue = 1;
7653                                 }
7654                         }
7655                 }
7656         } else { /* vt is disabled*/
7657                 struct rte_eth_dcb_rx_conf *rx_conf =
7658                                 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7659                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7660                         dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7661                 if (dcb_info->nb_tcs == ETH_4_TCS) {
7662                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7663                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7664                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7665                         }
7666                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7667                         dcb_info->tc_queue.tc_txq[0][1].base = 64;
7668                         dcb_info->tc_queue.tc_txq[0][2].base = 96;
7669                         dcb_info->tc_queue.tc_txq[0][3].base = 112;
7670                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7671                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7672                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7673                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7674                 } else if (dcb_info->nb_tcs == ETH_8_TCS) {
7675                         for (i = 0; i < dcb_info->nb_tcs; i++) {
7676                                 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7677                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7678                         }
7679                         dcb_info->tc_queue.tc_txq[0][0].base = 0;
7680                         dcb_info->tc_queue.tc_txq[0][1].base = 32;
7681                         dcb_info->tc_queue.tc_txq[0][2].base = 64;
7682                         dcb_info->tc_queue.tc_txq[0][3].base = 80;
7683                         dcb_info->tc_queue.tc_txq[0][4].base = 96;
7684                         dcb_info->tc_queue.tc_txq[0][5].base = 104;
7685                         dcb_info->tc_queue.tc_txq[0][6].base = 112;
7686                         dcb_info->tc_queue.tc_txq[0][7].base = 120;
7687                         dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7688                         dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7689                         dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7690                         dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7691                         dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7692                         dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7693                         dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7694                         dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7695                 }
7696         }
7697         for (i = 0; i < dcb_info->nb_tcs; i++) {
7698                 tc = &dcb_config->tc_config[i];
7699                 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7700         }
7701         return 0;
7702 }
7703
7704 /* Update e-tag ether type */
7705 static int
7706 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7707                             uint16_t ether_type)
7708 {
7709         uint32_t etag_etype;
7710
7711         if (hw->mac.type != ixgbe_mac_X550 &&
7712             hw->mac.type != ixgbe_mac_X550EM_x &&
7713             hw->mac.type != ixgbe_mac_X550EM_a) {
7714                 return -ENOTSUP;
7715         }
7716
7717         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7718         etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7719         etag_etype |= ether_type;
7720         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7721         IXGBE_WRITE_FLUSH(hw);
7722
7723         return 0;
7724 }
7725
7726 /* Config l2 tunnel ether type */
7727 static int
7728 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7729                                   struct rte_eth_l2_tunnel_conf *l2_tunnel)
7730 {
7731         int ret = 0;
7732         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7733         struct ixgbe_l2_tn_info *l2_tn_info =
7734                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7735
7736         if (l2_tunnel == NULL)
7737                 return -EINVAL;
7738
7739         switch (l2_tunnel->l2_tunnel_type) {
7740         case RTE_L2_TUNNEL_TYPE_E_TAG:
7741                 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7742                 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7743                 break;
7744         default:
7745                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7746                 ret = -EINVAL;
7747                 break;
7748         }
7749
7750         return ret;
7751 }
7752
7753 /* Enable e-tag tunnel */
7754 static int
7755 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7756 {
7757         uint32_t etag_etype;
7758
7759         if (hw->mac.type != ixgbe_mac_X550 &&
7760             hw->mac.type != ixgbe_mac_X550EM_x &&
7761             hw->mac.type != ixgbe_mac_X550EM_a) {
7762                 return -ENOTSUP;
7763         }
7764
7765         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7766         etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7767         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7768         IXGBE_WRITE_FLUSH(hw);
7769
7770         return 0;
7771 }
7772
7773 /* Enable l2 tunnel */
7774 static int
7775 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7776                            enum rte_eth_tunnel_type l2_tunnel_type)
7777 {
7778         int ret = 0;
7779         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7780         struct ixgbe_l2_tn_info *l2_tn_info =
7781                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7782
7783         switch (l2_tunnel_type) {
7784         case RTE_L2_TUNNEL_TYPE_E_TAG:
7785                 l2_tn_info->e_tag_en = TRUE;
7786                 ret = ixgbe_e_tag_enable(hw);
7787                 break;
7788         default:
7789                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7790                 ret = -EINVAL;
7791                 break;
7792         }
7793
7794         return ret;
7795 }
7796
7797 /* Disable e-tag tunnel */
7798 static int
7799 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7800 {
7801         uint32_t etag_etype;
7802
7803         if (hw->mac.type != ixgbe_mac_X550 &&
7804             hw->mac.type != ixgbe_mac_X550EM_x &&
7805             hw->mac.type != ixgbe_mac_X550EM_a) {
7806                 return -ENOTSUP;
7807         }
7808
7809         etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7810         etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7811         IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7812         IXGBE_WRITE_FLUSH(hw);
7813
7814         return 0;
7815 }
7816
7817 /* Disable l2 tunnel */
7818 static int
7819 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7820                             enum rte_eth_tunnel_type l2_tunnel_type)
7821 {
7822         int ret = 0;
7823         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7824         struct ixgbe_l2_tn_info *l2_tn_info =
7825                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7826
7827         switch (l2_tunnel_type) {
7828         case RTE_L2_TUNNEL_TYPE_E_TAG:
7829                 l2_tn_info->e_tag_en = FALSE;
7830                 ret = ixgbe_e_tag_disable(hw);
7831                 break;
7832         default:
7833                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7834                 ret = -EINVAL;
7835                 break;
7836         }
7837
7838         return ret;
7839 }
7840
7841 static int
7842 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7843                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7844 {
7845         int ret = 0;
7846         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7847         uint32_t i, rar_entries;
7848         uint32_t rar_low, rar_high;
7849
7850         if (hw->mac.type != ixgbe_mac_X550 &&
7851             hw->mac.type != ixgbe_mac_X550EM_x &&
7852             hw->mac.type != ixgbe_mac_X550EM_a) {
7853                 return -ENOTSUP;
7854         }
7855
7856         rar_entries = ixgbe_get_num_rx_addrs(hw);
7857
7858         for (i = 1; i < rar_entries; i++) {
7859                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7860                 rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7861                 if ((rar_high & IXGBE_RAH_AV) &&
7862                     (rar_high & IXGBE_RAH_ADTYPE) &&
7863                     ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7864                      l2_tunnel->tunnel_id)) {
7865                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7866                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7867
7868                         ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7869
7870                         return ret;
7871                 }
7872         }
7873
7874         return ret;
7875 }
7876
7877 static int
7878 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7879                        struct rte_eth_l2_tunnel_conf *l2_tunnel)
7880 {
7881         int ret = 0;
7882         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7883         uint32_t i, rar_entries;
7884         uint32_t rar_low, rar_high;
7885
7886         if (hw->mac.type != ixgbe_mac_X550 &&
7887             hw->mac.type != ixgbe_mac_X550EM_x &&
7888             hw->mac.type != ixgbe_mac_X550EM_a) {
7889                 return -ENOTSUP;
7890         }
7891
7892         /* One entry for one tunnel. Try to remove potential existing entry. */
7893         ixgbe_e_tag_filter_del(dev, l2_tunnel);
7894
7895         rar_entries = ixgbe_get_num_rx_addrs(hw);
7896
7897         for (i = 1; i < rar_entries; i++) {
7898                 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7899                 if (rar_high & IXGBE_RAH_AV) {
7900                         continue;
7901                 } else {
7902                         ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7903                         rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7904                         rar_low = l2_tunnel->tunnel_id;
7905
7906                         IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7907                         IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7908
7909                         return ret;
7910                 }
7911         }
7912
7913         PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7914                      " Please remove a rule before adding a new one.");
7915         return -EINVAL;
7916 }
7917
7918 static inline struct ixgbe_l2_tn_filter *
7919 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7920                           struct ixgbe_l2_tn_key *key)
7921 {
7922         int ret;
7923
7924         ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7925         if (ret < 0)
7926                 return NULL;
7927
7928         return l2_tn_info->hash_map[ret];
7929 }
7930
7931 static inline int
7932 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7933                           struct ixgbe_l2_tn_filter *l2_tn_filter)
7934 {
7935         int ret;
7936
7937         ret = rte_hash_add_key(l2_tn_info->hash_handle,
7938                                &l2_tn_filter->key);
7939
7940         if (ret < 0) {
7941                 PMD_DRV_LOG(ERR,
7942                             "Failed to insert L2 tunnel filter"
7943                             " to hash table %d!",
7944                             ret);
7945                 return ret;
7946         }
7947
7948         l2_tn_info->hash_map[ret] = l2_tn_filter;
7949
7950         TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7951
7952         return 0;
7953 }
7954
7955 static inline int
7956 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7957                           struct ixgbe_l2_tn_key *key)
7958 {
7959         int ret;
7960         struct ixgbe_l2_tn_filter *l2_tn_filter;
7961
7962         ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7963
7964         if (ret < 0) {
7965                 PMD_DRV_LOG(ERR,
7966                             "No such L2 tunnel filter to delete %d!",
7967                             ret);
7968                 return ret;
7969         }
7970
7971         l2_tn_filter = l2_tn_info->hash_map[ret];
7972         l2_tn_info->hash_map[ret] = NULL;
7973
7974         TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7975         rte_free(l2_tn_filter);
7976
7977         return 0;
7978 }
7979
7980 /* Add l2 tunnel filter */
7981 int
7982 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7983                                struct rte_eth_l2_tunnel_conf *l2_tunnel,
7984                                bool restore)
7985 {
7986         int ret;
7987         struct ixgbe_l2_tn_info *l2_tn_info =
7988                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7989         struct ixgbe_l2_tn_key key;
7990         struct ixgbe_l2_tn_filter *node;
7991
7992         if (!restore) {
7993                 key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7994                 key.tn_id = l2_tunnel->tunnel_id;
7995
7996                 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7997
7998                 if (node) {
7999                         PMD_DRV_LOG(ERR,
8000                                     "The L2 tunnel filter already exists!");
8001                         return -EINVAL;
8002                 }
8003
8004                 node = rte_zmalloc("ixgbe_l2_tn",
8005                                    sizeof(struct ixgbe_l2_tn_filter),
8006                                    0);
8007                 if (!node)
8008                         return -ENOMEM;
8009
8010                 rte_memcpy(&node->key,
8011                                  &key,
8012                                  sizeof(struct ixgbe_l2_tn_key));
8013                 node->pool = l2_tunnel->pool;
8014                 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
8015                 if (ret < 0) {
8016                         rte_free(node);
8017                         return ret;
8018                 }
8019         }
8020
8021         switch (l2_tunnel->l2_tunnel_type) {
8022         case RTE_L2_TUNNEL_TYPE_E_TAG:
8023                 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
8024                 break;
8025         default:
8026                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8027                 ret = -EINVAL;
8028                 break;
8029         }
8030
8031         if ((!restore) && (ret < 0))
8032                 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
8033
8034         return ret;
8035 }
8036
8037 /* Delete l2 tunnel filter */
8038 int
8039 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
8040                                struct rte_eth_l2_tunnel_conf *l2_tunnel)
8041 {
8042         int ret;
8043         struct ixgbe_l2_tn_info *l2_tn_info =
8044                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8045         struct ixgbe_l2_tn_key key;
8046
8047         key.l2_tn_type = l2_tunnel->l2_tunnel_type;
8048         key.tn_id = l2_tunnel->tunnel_id;
8049         ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
8050         if (ret < 0)
8051                 return ret;
8052
8053         switch (l2_tunnel->l2_tunnel_type) {
8054         case RTE_L2_TUNNEL_TYPE_E_TAG:
8055                 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
8056                 break;
8057         default:
8058                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8059                 ret = -EINVAL;
8060                 break;
8061         }
8062
8063         return ret;
8064 }
8065
8066 /**
8067  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
8068  * @dev: pointer to rte_eth_dev structure
8069  * @filter_op:operation will be taken.
8070  * @arg: a pointer to specific structure corresponding to the filter_op
8071  */
8072 static int
8073 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
8074                                   enum rte_filter_op filter_op,
8075                                   void *arg)
8076 {
8077         int ret;
8078
8079         if (filter_op == RTE_ETH_FILTER_NOP)
8080                 return 0;
8081
8082         if (arg == NULL) {
8083                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
8084                             filter_op);
8085                 return -EINVAL;
8086         }
8087
8088         switch (filter_op) {
8089         case RTE_ETH_FILTER_ADD:
8090                 ret = ixgbe_dev_l2_tunnel_filter_add
8091                         (dev,
8092                          (struct rte_eth_l2_tunnel_conf *)arg,
8093                          FALSE);
8094                 break;
8095         case RTE_ETH_FILTER_DELETE:
8096                 ret = ixgbe_dev_l2_tunnel_filter_del
8097                         (dev,
8098                          (struct rte_eth_l2_tunnel_conf *)arg);
8099                 break;
8100         default:
8101                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
8102                 ret = -EINVAL;
8103                 break;
8104         }
8105         return ret;
8106 }
8107
8108 static int
8109 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
8110 {
8111         int ret = 0;
8112         uint32_t ctrl;
8113         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8114
8115         if (hw->mac.type != ixgbe_mac_X550 &&
8116             hw->mac.type != ixgbe_mac_X550EM_x &&
8117             hw->mac.type != ixgbe_mac_X550EM_a) {
8118                 return -ENOTSUP;
8119         }
8120
8121         ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
8122         ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
8123         if (en)
8124                 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
8125         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
8126
8127         return ret;
8128 }
8129
8130 /* Enable l2 tunnel forwarding */
8131 static int
8132 ixgbe_dev_l2_tunnel_forwarding_enable
8133         (struct rte_eth_dev *dev,
8134          enum rte_eth_tunnel_type l2_tunnel_type)
8135 {
8136         struct ixgbe_l2_tn_info *l2_tn_info =
8137                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8138         int ret = 0;
8139
8140         switch (l2_tunnel_type) {
8141         case RTE_L2_TUNNEL_TYPE_E_TAG:
8142                 l2_tn_info->e_tag_fwd_en = TRUE;
8143                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
8144                 break;
8145         default:
8146                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8147                 ret = -EINVAL;
8148                 break;
8149         }
8150
8151         return ret;
8152 }
8153
8154 /* Disable l2 tunnel forwarding */
8155 static int
8156 ixgbe_dev_l2_tunnel_forwarding_disable
8157         (struct rte_eth_dev *dev,
8158          enum rte_eth_tunnel_type l2_tunnel_type)
8159 {
8160         struct ixgbe_l2_tn_info *l2_tn_info =
8161                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8162         int ret = 0;
8163
8164         switch (l2_tunnel_type) {
8165         case RTE_L2_TUNNEL_TYPE_E_TAG:
8166                 l2_tn_info->e_tag_fwd_en = FALSE;
8167                 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
8168                 break;
8169         default:
8170                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8171                 ret = -EINVAL;
8172                 break;
8173         }
8174
8175         return ret;
8176 }
8177
8178 static int
8179 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
8180                              struct rte_eth_l2_tunnel_conf *l2_tunnel,
8181                              bool en)
8182 {
8183         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
8184         int ret = 0;
8185         uint32_t vmtir, vmvir;
8186         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8187
8188         if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
8189                 PMD_DRV_LOG(ERR,
8190                             "VF id %u should be less than %u",
8191                             l2_tunnel->vf_id,
8192                             pci_dev->max_vfs);
8193                 return -EINVAL;
8194         }
8195
8196         if (hw->mac.type != ixgbe_mac_X550 &&
8197             hw->mac.type != ixgbe_mac_X550EM_x &&
8198             hw->mac.type != ixgbe_mac_X550EM_a) {
8199                 return -ENOTSUP;
8200         }
8201
8202         if (en)
8203                 vmtir = l2_tunnel->tunnel_id;
8204         else
8205                 vmtir = 0;
8206
8207         IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
8208
8209         vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
8210         vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
8211         if (en)
8212                 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
8213         IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
8214
8215         return ret;
8216 }
8217
8218 /* Enable l2 tunnel tag insertion */
8219 static int
8220 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
8221                                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
8222 {
8223         int ret = 0;
8224
8225         switch (l2_tunnel->l2_tunnel_type) {
8226         case RTE_L2_TUNNEL_TYPE_E_TAG:
8227                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
8228                 break;
8229         default:
8230                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8231                 ret = -EINVAL;
8232                 break;
8233         }
8234
8235         return ret;
8236 }
8237
8238 /* Disable l2 tunnel tag insertion */
8239 static int
8240 ixgbe_dev_l2_tunnel_insertion_disable
8241         (struct rte_eth_dev *dev,
8242          struct rte_eth_l2_tunnel_conf *l2_tunnel)
8243 {
8244         int ret = 0;
8245
8246         switch (l2_tunnel->l2_tunnel_type) {
8247         case RTE_L2_TUNNEL_TYPE_E_TAG:
8248                 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
8249                 break;
8250         default:
8251                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8252                 ret = -EINVAL;
8253                 break;
8254         }
8255
8256         return ret;
8257 }
8258
8259 static int
8260 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
8261                              bool en)
8262 {
8263         int ret = 0;
8264         uint32_t qde;
8265         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8266
8267         if (hw->mac.type != ixgbe_mac_X550 &&
8268             hw->mac.type != ixgbe_mac_X550EM_x &&
8269             hw->mac.type != ixgbe_mac_X550EM_a) {
8270                 return -ENOTSUP;
8271         }
8272
8273         qde = IXGBE_READ_REG(hw, IXGBE_QDE);
8274         if (en)
8275                 qde |= IXGBE_QDE_STRIP_TAG;
8276         else
8277                 qde &= ~IXGBE_QDE_STRIP_TAG;
8278         qde &= ~IXGBE_QDE_READ;
8279         qde |= IXGBE_QDE_WRITE;
8280         IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
8281
8282         return ret;
8283 }
8284
8285 /* Enable l2 tunnel tag stripping */
8286 static int
8287 ixgbe_dev_l2_tunnel_stripping_enable
8288         (struct rte_eth_dev *dev,
8289          enum rte_eth_tunnel_type l2_tunnel_type)
8290 {
8291         int ret = 0;
8292
8293         switch (l2_tunnel_type) {
8294         case RTE_L2_TUNNEL_TYPE_E_TAG:
8295                 ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
8296                 break;
8297         default:
8298                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8299                 ret = -EINVAL;
8300                 break;
8301         }
8302
8303         return ret;
8304 }
8305
8306 /* Disable l2 tunnel tag stripping */
8307 static int
8308 ixgbe_dev_l2_tunnel_stripping_disable
8309         (struct rte_eth_dev *dev,
8310          enum rte_eth_tunnel_type l2_tunnel_type)
8311 {
8312         int ret = 0;
8313
8314         switch (l2_tunnel_type) {
8315         case RTE_L2_TUNNEL_TYPE_E_TAG:
8316                 ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
8317                 break;
8318         default:
8319                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8320                 ret = -EINVAL;
8321                 break;
8322         }
8323
8324         return ret;
8325 }
8326
8327 /* Enable/disable l2 tunnel offload functions */
8328 static int
8329 ixgbe_dev_l2_tunnel_offload_set
8330         (struct rte_eth_dev *dev,
8331          struct rte_eth_l2_tunnel_conf *l2_tunnel,
8332          uint32_t mask,
8333          uint8_t en)
8334 {
8335         int ret = 0;
8336
8337         if (l2_tunnel == NULL)
8338                 return -EINVAL;
8339
8340         ret = -EINVAL;
8341         if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
8342                 if (en)
8343                         ret = ixgbe_dev_l2_tunnel_enable(
8344                                 dev,
8345                                 l2_tunnel->l2_tunnel_type);
8346                 else
8347                         ret = ixgbe_dev_l2_tunnel_disable(
8348                                 dev,
8349                                 l2_tunnel->l2_tunnel_type);
8350         }
8351
8352         if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
8353                 if (en)
8354                         ret = ixgbe_dev_l2_tunnel_insertion_enable(
8355                                 dev,
8356                                 l2_tunnel);
8357                 else
8358                         ret = ixgbe_dev_l2_tunnel_insertion_disable(
8359                                 dev,
8360                                 l2_tunnel);
8361         }
8362
8363         if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
8364                 if (en)
8365                         ret = ixgbe_dev_l2_tunnel_stripping_enable(
8366                                 dev,
8367                                 l2_tunnel->l2_tunnel_type);
8368                 else
8369                         ret = ixgbe_dev_l2_tunnel_stripping_disable(
8370                                 dev,
8371                                 l2_tunnel->l2_tunnel_type);
8372         }
8373
8374         if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
8375                 if (en)
8376                         ret = ixgbe_dev_l2_tunnel_forwarding_enable(
8377                                 dev,
8378                                 l2_tunnel->l2_tunnel_type);
8379                 else
8380                         ret = ixgbe_dev_l2_tunnel_forwarding_disable(
8381                                 dev,
8382                                 l2_tunnel->l2_tunnel_type);
8383         }
8384
8385         return ret;
8386 }
8387
8388 static int
8389 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
8390                         uint16_t port)
8391 {
8392         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
8393         IXGBE_WRITE_FLUSH(hw);
8394
8395         return 0;
8396 }
8397
8398 /* There's only one register for VxLAN UDP port.
8399  * So, we cannot add several ports. Will update it.
8400  */
8401 static int
8402 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8403                      uint16_t port)
8404 {
8405         if (port == 0) {
8406                 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8407                 return -EINVAL;
8408         }
8409
8410         return ixgbe_update_vxlan_port(hw, port);
8411 }
8412
8413 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8414  * UDP port, it must have a value.
8415  * So, will reset it to the original value 0.
8416  */
8417 static int
8418 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8419                      uint16_t port)
8420 {
8421         uint16_t cur_port;
8422
8423         cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8424
8425         if (cur_port != port) {
8426                 PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8427                 return -EINVAL;
8428         }
8429
8430         return ixgbe_update_vxlan_port(hw, 0);
8431 }
8432
8433 /* Add UDP tunneling port */
8434 static int
8435 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8436                               struct rte_eth_udp_tunnel *udp_tunnel)
8437 {
8438         int ret = 0;
8439         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8440
8441         if (hw->mac.type != ixgbe_mac_X550 &&
8442             hw->mac.type != ixgbe_mac_X550EM_x &&
8443             hw->mac.type != ixgbe_mac_X550EM_a) {
8444                 return -ENOTSUP;
8445         }
8446
8447         if (udp_tunnel == NULL)
8448                 return -EINVAL;
8449
8450         switch (udp_tunnel->prot_type) {
8451         case RTE_TUNNEL_TYPE_VXLAN:
8452                 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8453                 break;
8454
8455         case RTE_TUNNEL_TYPE_GENEVE:
8456         case RTE_TUNNEL_TYPE_TEREDO:
8457                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8458                 ret = -EINVAL;
8459                 break;
8460
8461         default:
8462                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8463                 ret = -EINVAL;
8464                 break;
8465         }
8466
8467         return ret;
8468 }
8469
8470 /* Remove UDP tunneling port */
8471 static int
8472 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8473                               struct rte_eth_udp_tunnel *udp_tunnel)
8474 {
8475         int ret = 0;
8476         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8477
8478         if (hw->mac.type != ixgbe_mac_X550 &&
8479             hw->mac.type != ixgbe_mac_X550EM_x &&
8480             hw->mac.type != ixgbe_mac_X550EM_a) {
8481                 return -ENOTSUP;
8482         }
8483
8484         if (udp_tunnel == NULL)
8485                 return -EINVAL;
8486
8487         switch (udp_tunnel->prot_type) {
8488         case RTE_TUNNEL_TYPE_VXLAN:
8489                 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8490                 break;
8491         case RTE_TUNNEL_TYPE_GENEVE:
8492         case RTE_TUNNEL_TYPE_TEREDO:
8493                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8494                 ret = -EINVAL;
8495                 break;
8496         default:
8497                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8498                 ret = -EINVAL;
8499                 break;
8500         }
8501
8502         return ret;
8503 }
8504
8505 static int
8506 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
8507 {
8508         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8509         int ret;
8510
8511         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) {
8512         case IXGBE_SUCCESS:
8513                 ret = 0;
8514                 break;
8515         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8516                 ret = -ENOTSUP;
8517                 break;
8518         default:
8519                 ret = -EAGAIN;
8520                 break;
8521         }
8522
8523         return ret;
8524 }
8525
8526 static int
8527 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
8528 {
8529         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8530         int ret;
8531
8532         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) {
8533         case IXGBE_SUCCESS:
8534                 ret = 0;
8535                 break;
8536         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8537                 ret = -ENOTSUP;
8538                 break;
8539         default:
8540                 ret = -EAGAIN;
8541                 break;
8542         }
8543
8544         return ret;
8545 }
8546
8547 static int
8548 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8549 {
8550         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8551         int ret;
8552         int mode = IXGBEVF_XCAST_MODE_ALLMULTI;
8553
8554         switch (hw->mac.ops.update_xcast_mode(hw, mode)) {
8555         case IXGBE_SUCCESS:
8556                 ret = 0;
8557                 break;
8558         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8559                 ret = -ENOTSUP;
8560                 break;
8561         default:
8562                 ret = -EAGAIN;
8563                 break;
8564         }
8565
8566         return ret;
8567 }
8568
8569 static int
8570 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8571 {
8572         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8573         int ret;
8574
8575         switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) {
8576         case IXGBE_SUCCESS:
8577                 ret = 0;
8578                 break;
8579         case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
8580                 ret = -ENOTSUP;
8581                 break;
8582         default:
8583                 ret = -EAGAIN;
8584                 break;
8585         }
8586
8587         return ret;
8588 }
8589
8590 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8591 {
8592         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8593         u32 in_msg = 0;
8594
8595         /* peek the message first */
8596         in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
8597
8598         /* PF reset VF event */
8599         if (in_msg == IXGBE_PF_CONTROL_MSG) {
8600                 /* dummy mbx read to ack pf */
8601                 if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8602                         return;
8603                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8604                                               NULL);
8605         }
8606 }
8607
8608 static int
8609 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8610 {
8611         uint32_t eicr;
8612         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8613         struct ixgbe_interrupt *intr =
8614                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8615         ixgbevf_intr_disable(dev);
8616
8617         /* read-on-clear nic registers here */
8618         eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8619         intr->flags = 0;
8620
8621         /* only one misc vector supported - mailbox */
8622         eicr &= IXGBE_VTEICR_MASK;
8623         if (eicr == IXGBE_MISC_VEC_ID)
8624                 intr->flags |= IXGBE_FLAG_MAILBOX;
8625
8626         return 0;
8627 }
8628
8629 static int
8630 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8631 {
8632         struct ixgbe_interrupt *intr =
8633                 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8634
8635         if (intr->flags & IXGBE_FLAG_MAILBOX) {
8636                 ixgbevf_mbx_process(dev);
8637                 intr->flags &= ~IXGBE_FLAG_MAILBOX;
8638         }
8639
8640         ixgbevf_intr_enable(dev);
8641
8642         return 0;
8643 }
8644
8645 static void
8646 ixgbevf_dev_interrupt_handler(void *param)
8647 {
8648         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8649
8650         ixgbevf_dev_interrupt_get_status(dev);
8651         ixgbevf_dev_interrupt_action(dev);
8652 }
8653
8654 /**
8655  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8656  *  @hw: pointer to hardware structure
8657  *
8658  *  Stops the transmit data path and waits for the HW to internally empty
8659  *  the Tx security block
8660  **/
8661 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8662 {
8663 #define IXGBE_MAX_SECTX_POLL 40
8664
8665         int i;
8666         int sectxreg;
8667
8668         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8669         sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8670         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8671         for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8672                 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8673                 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8674                         break;
8675                 /* Use interrupt-safe sleep just in case */
8676                 usec_delay(1000);
8677         }
8678
8679         /* For informational purposes only */
8680         if (i >= IXGBE_MAX_SECTX_POLL)
8681                 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8682                          "path fully disabled.  Continuing with init.");
8683
8684         return IXGBE_SUCCESS;
8685 }
8686
8687 /**
8688  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8689  *  @hw: pointer to hardware structure
8690  *
8691  *  Enables the transmit data path.
8692  **/
8693 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8694 {
8695         uint32_t sectxreg;
8696
8697         sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8698         sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8699         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8700         IXGBE_WRITE_FLUSH(hw);
8701
8702         return IXGBE_SUCCESS;
8703 }
8704
8705 /* restore n-tuple filter */
8706 static inline void
8707 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8708 {
8709         struct ixgbe_filter_info *filter_info =
8710                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8711         struct ixgbe_5tuple_filter *node;
8712
8713         TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8714                 ixgbe_inject_5tuple_filter(dev, node);
8715         }
8716 }
8717
8718 /* restore ethernet type filter */
8719 static inline void
8720 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8721 {
8722         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8723         struct ixgbe_filter_info *filter_info =
8724                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8725         int i;
8726
8727         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8728                 if (filter_info->ethertype_mask & (1 << i)) {
8729                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8730                                         filter_info->ethertype_filters[i].etqf);
8731                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8732                                         filter_info->ethertype_filters[i].etqs);
8733                         IXGBE_WRITE_FLUSH(hw);
8734                 }
8735         }
8736 }
8737
8738 /* restore SYN filter */
8739 static inline void
8740 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8741 {
8742         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8743         struct ixgbe_filter_info *filter_info =
8744                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8745         uint32_t synqf;
8746
8747         synqf = filter_info->syn_info;
8748
8749         if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8750                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8751                 IXGBE_WRITE_FLUSH(hw);
8752         }
8753 }
8754
8755 /* restore L2 tunnel filter */
8756 static inline void
8757 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8758 {
8759         struct ixgbe_l2_tn_info *l2_tn_info =
8760                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8761         struct ixgbe_l2_tn_filter *node;
8762         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8763
8764         TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8765                 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8766                 l2_tn_conf.tunnel_id      = node->key.tn_id;
8767                 l2_tn_conf.pool           = node->pool;
8768                 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8769         }
8770 }
8771
8772 /* restore rss filter */
8773 static inline void
8774 ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
8775 {
8776         struct ixgbe_filter_info *filter_info =
8777                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8778
8779         if (filter_info->rss_info.conf.queue_num)
8780                 ixgbe_config_rss_filter(dev,
8781                         &filter_info->rss_info, TRUE);
8782 }
8783
8784 static int
8785 ixgbe_filter_restore(struct rte_eth_dev *dev)
8786 {
8787         ixgbe_ntuple_filter_restore(dev);
8788         ixgbe_ethertype_filter_restore(dev);
8789         ixgbe_syn_filter_restore(dev);
8790         ixgbe_fdir_filter_restore(dev);
8791         ixgbe_l2_tn_filter_restore(dev);
8792         ixgbe_rss_filter_restore(dev);
8793
8794         return 0;
8795 }
8796
8797 static void
8798 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8799 {
8800         struct ixgbe_l2_tn_info *l2_tn_info =
8801                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8802         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8803
8804         if (l2_tn_info->e_tag_en)
8805                 (void)ixgbe_e_tag_enable(hw);
8806
8807         if (l2_tn_info->e_tag_fwd_en)
8808                 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8809
8810         (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8811 }
8812
8813 /* remove all the n-tuple filters */
8814 void
8815 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8816 {
8817         struct ixgbe_filter_info *filter_info =
8818                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8819         struct ixgbe_5tuple_filter *p_5tuple;
8820
8821         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8822                 ixgbe_remove_5tuple_filter(dev, p_5tuple);
8823 }
8824
8825 /* remove all the ether type filters */
8826 void
8827 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8828 {
8829         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8830         struct ixgbe_filter_info *filter_info =
8831                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8832         int i;
8833
8834         for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8835                 if (filter_info->ethertype_mask & (1 << i) &&
8836                     !filter_info->ethertype_filters[i].conf) {
8837                         (void)ixgbe_ethertype_filter_remove(filter_info,
8838                                                             (uint8_t)i);
8839                         IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8840                         IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8841                         IXGBE_WRITE_FLUSH(hw);
8842                 }
8843         }
8844 }
8845
8846 /* remove the SYN filter */
8847 void
8848 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8849 {
8850         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8851         struct ixgbe_filter_info *filter_info =
8852                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8853
8854         if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8855                 filter_info->syn_info = 0;
8856
8857                 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8858                 IXGBE_WRITE_FLUSH(hw);
8859         }
8860 }
8861
8862 /* remove all the L2 tunnel filters */
8863 int
8864 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8865 {
8866         struct ixgbe_l2_tn_info *l2_tn_info =
8867                 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8868         struct ixgbe_l2_tn_filter *l2_tn_filter;
8869         struct rte_eth_l2_tunnel_conf l2_tn_conf;
8870         int ret = 0;
8871
8872         while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8873                 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8874                 l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8875                 l2_tn_conf.pool           = l2_tn_filter->pool;
8876                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8877                 if (ret < 0)
8878                         return ret;
8879         }
8880
8881         return 0;
8882 }
8883
8884 void
8885 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev,
8886                                 struct ixgbe_macsec_setting *macsec_setting)
8887 {
8888         struct ixgbe_macsec_setting *macsec =
8889                 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
8890
8891         macsec->offload_en = macsec_setting->offload_en;
8892         macsec->encrypt_en = macsec_setting->encrypt_en;
8893         macsec->replayprotect_en = macsec_setting->replayprotect_en;
8894 }
8895
8896 void
8897 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev)
8898 {
8899         struct ixgbe_macsec_setting *macsec =
8900                 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
8901
8902         macsec->offload_en = 0;
8903         macsec->encrypt_en = 0;
8904         macsec->replayprotect_en = 0;
8905 }
8906
8907 void
8908 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
8909                                 struct ixgbe_macsec_setting *macsec_setting)
8910 {
8911         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8912         uint32_t ctrl;
8913         uint8_t en = macsec_setting->encrypt_en;
8914         uint8_t rp = macsec_setting->replayprotect_en;
8915
8916         /**
8917          * Workaround:
8918          * As no ixgbe_disable_sec_rx_path equivalent is
8919          * implemented for tx in the base code, and we are
8920          * not allowed to modify the base code in DPDK, so
8921          * just call the hand-written one directly for now.
8922          * The hardware support has been checked by
8923          * ixgbe_disable_sec_rx_path().
8924          */
8925         ixgbe_disable_sec_tx_path_generic(hw);
8926
8927         /* Enable Ethernet CRC (required by MACsec offload) */
8928         ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
8929         ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
8930         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
8931
8932         /* Enable the TX and RX crypto engines */
8933         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8934         ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
8935         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
8936
8937         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
8938         ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
8939         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
8940
8941         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
8942         ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
8943         ctrl |= 0x3;
8944         IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
8945
8946         /* Enable SA lookup */
8947         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
8948         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
8949         ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
8950                      IXGBE_LSECTXCTRL_AUTH;
8951         ctrl |= IXGBE_LSECTXCTRL_AISCI;
8952         ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
8953         ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
8954         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
8955
8956         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
8957         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
8958         ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
8959         ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
8960         if (rp)
8961                 ctrl |= IXGBE_LSECRXCTRL_RP;
8962         else
8963                 ctrl &= ~IXGBE_LSECRXCTRL_RP;
8964         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
8965
8966         /* Start the data paths */
8967         ixgbe_enable_sec_rx_path(hw);
8968         /**
8969          * Workaround:
8970          * As no ixgbe_enable_sec_rx_path equivalent is
8971          * implemented for tx in the base code, and we are
8972          * not allowed to modify the base code in DPDK, so
8973          * just call the hand-written one directly for now.
8974          */
8975         ixgbe_enable_sec_tx_path_generic(hw);
8976 }
8977
8978 void
8979 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev)
8980 {
8981         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8982         uint32_t ctrl;
8983
8984         /**
8985          * Workaround:
8986          * As no ixgbe_disable_sec_rx_path equivalent is
8987          * implemented for tx in the base code, and we are
8988          * not allowed to modify the base code in DPDK, so
8989          * just call the hand-written one directly for now.
8990          * The hardware support has been checked by
8991          * ixgbe_disable_sec_rx_path().
8992          */
8993         ixgbe_disable_sec_tx_path_generic(hw);
8994
8995         /* Disable the TX and RX crypto engines */
8996         ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8997         ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
8998         IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
8999
9000         ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
9001         ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
9002         IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
9003
9004         /* Disable SA lookup */
9005         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
9006         ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
9007         ctrl |= IXGBE_LSECTXCTRL_DISABLE;
9008         IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
9009
9010         ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
9011         ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
9012         ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
9013         IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
9014
9015         /* Start the data paths */
9016         ixgbe_enable_sec_rx_path(hw);
9017         /**
9018          * Workaround:
9019          * As no ixgbe_enable_sec_rx_path equivalent is
9020          * implemented for tx in the base code, and we are
9021          * not allowed to modify the base code in DPDK, so
9022          * just call the hand-written one directly for now.
9023          */
9024         ixgbe_enable_sec_tx_path_generic(hw);
9025 }
9026
9027 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
9028 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
9029 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
9030 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
9031 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
9032 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
9033 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf,
9034                               IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>");
9035
9036 RTE_INIT(ixgbe_init_log)
9037 {
9038         ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
9039         if (ixgbe_logtype_init >= 0)
9040                 rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
9041         ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
9042         if (ixgbe_logtype_driver >= 0)
9043                 rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
9044 #ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
9045         ixgbe_logtype_rx = rte_log_register("pmd.net.ixgbe.rx");
9046         if (ixgbe_logtype_rx >= 0)
9047                 rte_log_set_level(ixgbe_logtype_rx, RTE_LOG_DEBUG);
9048 #endif
9049
9050 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
9051         ixgbe_logtype_tx = rte_log_register("pmd.net.ixgbe.tx");
9052         if (ixgbe_logtype_tx >= 0)
9053                 rte_log_set_level(ixgbe_logtype_tx, RTE_LOG_DEBUG);
9054 #endif
9055
9056 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
9057         ixgbe_logtype_tx_free = rte_log_register("pmd.net.ixgbe.tx_free");
9058         if (ixgbe_logtype_tx_free >= 0)
9059                 rte_log_set_level(ixgbe_logtype_tx_free, RTE_LOG_DEBUG);
9060 #endif
9061 }