825de51fbb8f165ed6ed0e7a98b8f249345bfd2b
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
46 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
47 #define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
48 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
49
50 #define I40E_CLEAR_PXE_WAIT_MS     200
51 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
52 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
53
54 /* Maximun number of capability elements */
55 #define I40E_MAX_CAP_ELE_NUM       128
56
57 /* Wait count and interval */
58 #define I40E_CHK_Q_ENA_COUNT       1000
59 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
60
61 /* Maximun number of VSI */
62 #define I40E_MAX_NUM_VSIS          (384UL)
63
64 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
65
66 /* Flow control default timer */
67 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
68
69 /* Flow control enable fwd bit */
70 #define I40E_PRTMAC_FWD_CTRL   0x00000001
71
72 /* Receive Packet Buffer size */
73 #define I40E_RXPBSIZE (968 * 1024)
74
75 /* Kilobytes shift */
76 #define I40E_KILOSHIFT 10
77
78 /* Flow control default high water */
79 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
80
81 /* Flow control default low water */
82 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
83
84 /* Receive Average Packet Size in Byte*/
85 #define I40E_PACKET_AVERAGE_SIZE 128
86
87 /* Mask of PF interrupt causes */
88 #define I40E_PFINT_ICR0_ENA_MASK ( \
89                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
91                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
92                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
93                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
94                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
95                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
97                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
98
99 #define I40E_FLOW_TYPES ( \
100         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
105         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
110         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
111
112 /* Additional timesync values. */
113 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
114 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
115 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
116 #define I40E_PRTTSYN_TSYNENA     0x80000000
117 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
118 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
119
120 /**
121  * Below are values for writing un-exposed registers suggested
122  * by silicon experts
123  */
124 /* Destination MAC address */
125 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
126 /* Source MAC address */
127 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
128 /* Outer (S-Tag) VLAN tag in the outer L2 header */
129 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
130 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
131 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
132 /* Single VLAN tag in the inner L2 header */
133 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
134 /* Source IPv4 address */
135 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
136 /* Destination IPv4 address */
137 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
138 /* Source IPv4 address for X722 */
139 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
140 /* Destination IPv4 address for X722 */
141 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
142 /* IPv4 Protocol for X722 */
143 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
144 /* IPv4 Time to Live for X722 */
145 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
146 /* IPv4 Type of Service (TOS) */
147 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
148 /* IPv4 Protocol */
149 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
150 /* IPv4 Time to Live */
151 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
152 /* Source IPv6 address */
153 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
154 /* Destination IPv6 address */
155 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
156 /* IPv6 Traffic Class (TC) */
157 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
158 /* IPv6 Next Header */
159 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
160 /* IPv6 Hop Limit */
161 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
162 /* Source L4 port */
163 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
164 /* Destination L4 port */
165 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
166 /* SCTP verification tag */
167 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
168 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
169 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
170 /* Source port of tunneling UDP */
171 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
172 /* Destination port of tunneling UDP */
173 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
174 /* UDP Tunneling ID, NVGRE/GRE key */
175 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
176 /* Last ether type */
177 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
178 /* Tunneling outer destination IPv4 address */
179 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
180 /* Tunneling outer destination IPv6 address */
181 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
182 /* 1st word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
184 /* 2nd word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
186 /* 3rd word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
188 /* 4th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
190 /* 5th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
192 /* 6th word of flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
194 /* 7th word of flex payload */
195 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
196 /* 8th word of flex payload */
197 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
198 /* all 8 words flex payload */
199 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
200 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
201
202 #define I40E_TRANSLATE_INSET 0
203 #define I40E_TRANSLATE_REG   1
204
205 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
206 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
207 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
208 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
209 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
210 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
211
212 /* PCI offset for querying capability */
213 #define PCI_DEV_CAP_REG            0xA4
214 /* PCI offset for enabling/disabling Extended Tag */
215 #define PCI_DEV_CTRL_REG           0xA8
216 /* Bit mask of Extended Tag capability */
217 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
218 /* Bit shift of Extended Tag enable/disable */
219 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
220 /* Bit mask of Extended Tag enable/disable */
221 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
222
223 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
224 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
225 static int i40e_dev_configure(struct rte_eth_dev *dev);
226 static int i40e_dev_start(struct rte_eth_dev *dev);
227 static void i40e_dev_stop(struct rte_eth_dev *dev);
228 static int i40e_dev_close(struct rte_eth_dev *dev);
229 static int  i40e_dev_reset(struct rte_eth_dev *dev);
230 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
231 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
232 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
233 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
234 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
235 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
236 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
237                                struct rte_eth_stats *stats);
238 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
239                                struct rte_eth_xstat *xstats, unsigned n);
240 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
241                                      struct rte_eth_xstat_name *xstats_names,
242                                      unsigned limit);
243 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
244 static int i40e_fw_version_get(struct rte_eth_dev *dev,
245                                 char *fw_version, size_t fw_size);
246 static int i40e_dev_info_get(struct rte_eth_dev *dev,
247                              struct rte_eth_dev_info *dev_info);
248 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
249                                 uint16_t vlan_id,
250                                 int on);
251 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
252                               enum rte_vlan_type vlan_type,
253                               uint16_t tpid);
254 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
255 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
256                                       uint16_t queue,
257                                       int on);
258 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
259 static int i40e_dev_led_on(struct rte_eth_dev *dev);
260 static int i40e_dev_led_off(struct rte_eth_dev *dev);
261 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
264                               struct rte_eth_fc_conf *fc_conf);
265 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
266                                        struct rte_eth_pfc_conf *pfc_conf);
267 static int i40e_macaddr_add(struct rte_eth_dev *dev,
268                             struct rte_ether_addr *mac_addr,
269                             uint32_t index,
270                             uint32_t pool);
271 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
272 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
273                                     struct rte_eth_rss_reta_entry64 *reta_conf,
274                                     uint16_t reta_size);
275 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
276                                    struct rte_eth_rss_reta_entry64 *reta_conf,
277                                    uint16_t reta_size);
278
279 static int i40e_get_cap(struct i40e_hw *hw);
280 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
281 static int i40e_pf_setup(struct i40e_pf *pf);
282 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
283 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
284 static int i40e_dcb_setup(struct rte_eth_dev *dev);
285 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
286                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
287 static void i40e_stat_update_48(struct i40e_hw *hw,
288                                uint32_t hireg,
289                                uint32_t loreg,
290                                bool offset_loaded,
291                                uint64_t *offset,
292                                uint64_t *stat);
293 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
294 static void i40e_dev_interrupt_handler(void *param);
295 static void i40e_dev_alarm_handler(void *param);
296 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
297                                 uint32_t base, uint32_t num);
298 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
299 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
300                         uint32_t base);
301 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
302                         uint16_t num);
303 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
304 static int i40e_veb_release(struct i40e_veb *veb);
305 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
306                                                 struct i40e_vsi *vsi);
307 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
308 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
309                                              struct i40e_macvlan_filter *mv_f,
310                                              int num,
311                                              uint16_t vlan);
312 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
313 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
314                                     struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
316                                       struct rte_eth_rss_conf *rss_conf);
317 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
320                                         struct rte_eth_udp_tunnel *udp_tunnel);
321 static void i40e_filter_input_set_init(struct i40e_pf *pf);
322 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
323                                 enum rte_filter_op filter_op,
324                                 void *arg);
325 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
326                                 enum rte_filter_type filter_type,
327                                 enum rte_filter_op filter_op,
328                                 void *arg);
329 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
330                                   struct rte_eth_dcb_info *dcb_info);
331 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
332 static void i40e_configure_registers(struct i40e_hw *hw);
333 static void i40e_hw_init(struct rte_eth_dev *dev);
334 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
335 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
336                                                      uint16_t seid,
337                                                      uint16_t rule_type,
338                                                      uint16_t *entries,
339                                                      uint16_t count,
340                                                      uint16_t rule_id);
341 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
342                         struct rte_eth_mirror_conf *mirror_conf,
343                         uint8_t sw_id, uint8_t on);
344 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
345
346 static int i40e_timesync_enable(struct rte_eth_dev *dev);
347 static int i40e_timesync_disable(struct rte_eth_dev *dev);
348 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
349                                            struct timespec *timestamp,
350                                            uint32_t flags);
351 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
352                                            struct timespec *timestamp);
353 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
354
355 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
356
357 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
358                                    struct timespec *timestamp);
359 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
360                                     const struct timespec *timestamp);
361
362 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
363                                          uint16_t queue_id);
364 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
365                                           uint16_t queue_id);
366
367 static int i40e_get_regs(struct rte_eth_dev *dev,
368                          struct rte_dev_reg_info *regs);
369
370 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
371
372 static int i40e_get_eeprom(struct rte_eth_dev *dev,
373                            struct rte_dev_eeprom_info *eeprom);
374
375 static int i40e_get_module_info(struct rte_eth_dev *dev,
376                                 struct rte_eth_dev_module_info *modinfo);
377 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
378                                   struct rte_dev_eeprom_info *info);
379
380 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
381                                       struct rte_ether_addr *mac_addr);
382
383 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
384
385 static int i40e_ethertype_filter_convert(
386         const struct rte_eth_ethertype_filter *input,
387         struct i40e_ethertype_filter *filter);
388 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
389                                    struct i40e_ethertype_filter *filter);
390
391 static int i40e_tunnel_filter_convert(
392         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
393         struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
395                                 struct i40e_tunnel_filter *tunnel_filter);
396 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
397
398 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
399 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
400 static void i40e_filter_restore(struct i40e_pf *pf);
401 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
402 static int i40e_pf_config_rss(struct i40e_pf *pf);
403
404 static const char *const valid_keys[] = {
405         ETH_I40E_FLOATING_VEB_ARG,
406         ETH_I40E_FLOATING_VEB_LIST_ARG,
407         ETH_I40E_SUPPORT_MULTI_DRIVER,
408         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
409         ETH_I40E_USE_LATEST_VEC,
410         ETH_I40E_VF_MSG_CFG,
411         NULL};
412
413 static const struct rte_pci_id pci_id_i40e_map[] = {
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
425         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
426         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
440         { .vendor_id = 0, /* sentinel */ },
441 };
442
443 static const struct eth_dev_ops i40e_eth_dev_ops = {
444         .dev_configure                = i40e_dev_configure,
445         .dev_start                    = i40e_dev_start,
446         .dev_stop                     = i40e_dev_stop,
447         .dev_close                    = i40e_dev_close,
448         .dev_reset                    = i40e_dev_reset,
449         .promiscuous_enable           = i40e_dev_promiscuous_enable,
450         .promiscuous_disable          = i40e_dev_promiscuous_disable,
451         .allmulticast_enable          = i40e_dev_allmulticast_enable,
452         .allmulticast_disable         = i40e_dev_allmulticast_disable,
453         .dev_set_link_up              = i40e_dev_set_link_up,
454         .dev_set_link_down            = i40e_dev_set_link_down,
455         .link_update                  = i40e_dev_link_update,
456         .stats_get                    = i40e_dev_stats_get,
457         .xstats_get                   = i40e_dev_xstats_get,
458         .xstats_get_names             = i40e_dev_xstats_get_names,
459         .stats_reset                  = i40e_dev_stats_reset,
460         .xstats_reset                 = i40e_dev_stats_reset,
461         .fw_version_get               = i40e_fw_version_get,
462         .dev_infos_get                = i40e_dev_info_get,
463         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
464         .vlan_filter_set              = i40e_vlan_filter_set,
465         .vlan_tpid_set                = i40e_vlan_tpid_set,
466         .vlan_offload_set             = i40e_vlan_offload_set,
467         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
468         .vlan_pvid_set                = i40e_vlan_pvid_set,
469         .rx_queue_start               = i40e_dev_rx_queue_start,
470         .rx_queue_stop                = i40e_dev_rx_queue_stop,
471         .tx_queue_start               = i40e_dev_tx_queue_start,
472         .tx_queue_stop                = i40e_dev_tx_queue_stop,
473         .rx_queue_setup               = i40e_dev_rx_queue_setup,
474         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
475         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
476         .rx_queue_release             = i40e_dev_rx_queue_release,
477         .tx_queue_setup               = i40e_dev_tx_queue_setup,
478         .tx_queue_release             = i40e_dev_tx_queue_release,
479         .dev_led_on                   = i40e_dev_led_on,
480         .dev_led_off                  = i40e_dev_led_off,
481         .flow_ctrl_get                = i40e_flow_ctrl_get,
482         .flow_ctrl_set                = i40e_flow_ctrl_set,
483         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
484         .mac_addr_add                 = i40e_macaddr_add,
485         .mac_addr_remove              = i40e_macaddr_remove,
486         .reta_update                  = i40e_dev_rss_reta_update,
487         .reta_query                   = i40e_dev_rss_reta_query,
488         .rss_hash_update              = i40e_dev_rss_hash_update,
489         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
490         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
491         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
492         .filter_ctrl                  = i40e_dev_filter_ctrl,
493         .rxq_info_get                 = i40e_rxq_info_get,
494         .txq_info_get                 = i40e_txq_info_get,
495         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
496         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
497         .mirror_rule_set              = i40e_mirror_rule_set,
498         .mirror_rule_reset            = i40e_mirror_rule_reset,
499         .timesync_enable              = i40e_timesync_enable,
500         .timesync_disable             = i40e_timesync_disable,
501         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
502         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
503         .get_dcb_info                 = i40e_dev_get_dcb_info,
504         .timesync_adjust_time         = i40e_timesync_adjust_time,
505         .timesync_read_time           = i40e_timesync_read_time,
506         .timesync_write_time          = i40e_timesync_write_time,
507         .get_reg                      = i40e_get_regs,
508         .get_eeprom_length            = i40e_get_eeprom_length,
509         .get_eeprom                   = i40e_get_eeprom,
510         .get_module_info              = i40e_get_module_info,
511         .get_module_eeprom            = i40e_get_module_eeprom,
512         .mac_addr_set                 = i40e_set_default_mac_addr,
513         .mtu_set                      = i40e_dev_mtu_set,
514         .tm_ops_get                   = i40e_tm_ops_get,
515         .tx_done_cleanup              = i40e_tx_done_cleanup,
516 };
517
518 /* store statistics names and its offset in stats structure */
519 struct rte_i40e_xstats_name_off {
520         char name[RTE_ETH_XSTATS_NAME_SIZE];
521         unsigned offset;
522 };
523
524 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
525         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
526         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
527         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
528         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
529         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
530                 rx_unknown_protocol)},
531         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
532         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
533         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
534         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
535 };
536
537 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
538                 sizeof(rte_i40e_stats_strings[0]))
539
540 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
541         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
542                 tx_dropped_link_down)},
543         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
544         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
545                 illegal_bytes)},
546         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
547         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
548                 mac_local_faults)},
549         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
550                 mac_remote_faults)},
551         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
552                 rx_length_errors)},
553         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
554         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
555         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
556         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
557         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
558         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
559                 rx_size_127)},
560         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
561                 rx_size_255)},
562         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
563                 rx_size_511)},
564         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
565                 rx_size_1023)},
566         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
567                 rx_size_1522)},
568         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
569                 rx_size_big)},
570         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
571                 rx_undersize)},
572         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
573                 rx_oversize)},
574         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
575                 mac_short_packet_dropped)},
576         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
577                 rx_fragments)},
578         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
579         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
580         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
581                 tx_size_127)},
582         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
583                 tx_size_255)},
584         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
585                 tx_size_511)},
586         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
587                 tx_size_1023)},
588         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
589                 tx_size_1522)},
590         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
591                 tx_size_big)},
592         {"rx_flow_director_atr_match_packets",
593                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
594         {"rx_flow_director_sb_match_packets",
595                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
596         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
597                 tx_lpi_status)},
598         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
599                 rx_lpi_status)},
600         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
601                 tx_lpi_count)},
602         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
603                 rx_lpi_count)},
604 };
605
606 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
607                 sizeof(rte_i40e_hw_port_strings[0]))
608
609 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
610         {"xon_packets", offsetof(struct i40e_hw_port_stats,
611                 priority_xon_rx)},
612         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
613                 priority_xoff_rx)},
614 };
615
616 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
617                 sizeof(rte_i40e_rxq_prio_strings[0]))
618
619 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
620         {"xon_packets", offsetof(struct i40e_hw_port_stats,
621                 priority_xon_tx)},
622         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
623                 priority_xoff_tx)},
624         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xon_2_xoff)},
626 };
627
628 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
629                 sizeof(rte_i40e_txq_prio_strings[0]))
630
631 static int
632 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
633         struct rte_pci_device *pci_dev)
634 {
635         char name[RTE_ETH_NAME_MAX_LEN];
636         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
637         int i, retval;
638
639         if (pci_dev->device.devargs) {
640                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
641                                 &eth_da);
642                 if (retval)
643                         return retval;
644         }
645
646         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
647                 sizeof(struct i40e_adapter),
648                 eth_dev_pci_specific_init, pci_dev,
649                 eth_i40e_dev_init, NULL);
650
651         if (retval || eth_da.nb_representor_ports < 1)
652                 return retval;
653
654         /* probe VF representor ports */
655         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
656                 pci_dev->device.name);
657
658         if (pf_ethdev == NULL)
659                 return -ENODEV;
660
661         for (i = 0; i < eth_da.nb_representor_ports; i++) {
662                 struct i40e_vf_representor representor = {
663                         .vf_id = eth_da.representor_ports[i],
664                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
665                                 pf_ethdev->data->dev_private)->switch_domain_id,
666                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
667                                 pf_ethdev->data->dev_private)
668                 };
669
670                 /* representor port net_bdf_port */
671                 snprintf(name, sizeof(name), "net_%s_representor_%d",
672                         pci_dev->device.name, eth_da.representor_ports[i]);
673
674                 retval = rte_eth_dev_create(&pci_dev->device, name,
675                         sizeof(struct i40e_vf_representor), NULL, NULL,
676                         i40e_vf_representor_init, &representor);
677
678                 if (retval)
679                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
680                                 "representor %s.", name);
681         }
682
683         return 0;
684 }
685
686 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
687 {
688         struct rte_eth_dev *ethdev;
689
690         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
691         if (!ethdev)
692                 return 0;
693
694         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
695                 return rte_eth_dev_pci_generic_remove(pci_dev,
696                                         i40e_vf_representor_uninit);
697         else
698                 return rte_eth_dev_pci_generic_remove(pci_dev,
699                                                 eth_i40e_dev_uninit);
700 }
701
702 static struct rte_pci_driver rte_i40e_pmd = {
703         .id_table = pci_id_i40e_map,
704         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
705         .probe = eth_i40e_pci_probe,
706         .remove = eth_i40e_pci_remove,
707 };
708
709 static inline void
710 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
711                          uint32_t reg_val)
712 {
713         uint32_t ori_reg_val;
714         struct rte_eth_dev *dev;
715
716         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
717         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
718         i40e_write_rx_ctl(hw, reg_addr, reg_val);
719         if (ori_reg_val != reg_val)
720                 PMD_DRV_LOG(WARNING,
721                             "i40e device %s changed global register [0x%08x]."
722                             " original: 0x%08x, new: 0x%08x",
723                             dev->device->name, reg_addr, ori_reg_val, reg_val);
724 }
725
726 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
727 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
728 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
729
730 #ifndef I40E_GLQF_ORT
731 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
732 #endif
733 #ifndef I40E_GLQF_PIT
734 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
735 #endif
736 #ifndef I40E_GLQF_L3_MAP
737 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
738 #endif
739
740 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
741 {
742         /*
743          * Initialize registers for parsing packet type of QinQ
744          * This should be removed from code once proper
745          * configuration API is added to avoid configuration conflicts
746          * between ports of the same device.
747          */
748         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
749         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
750 }
751
752 static inline void i40e_config_automask(struct i40e_pf *pf)
753 {
754         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
755         uint32_t val;
756
757         /* INTENA flag is not auto-cleared for interrupt */
758         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
759         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
760                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
761
762         /* If support multi-driver, PF will use INT0. */
763         if (!pf->support_multi_driver)
764                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
765
766         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
767 }
768
769 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
770
771 /*
772  * Add a ethertype filter to drop all flow control frames transmitted
773  * from VSIs.
774 */
775 static void
776 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
777 {
778         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
779         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
780                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
781                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
782         int ret;
783
784         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
785                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
786                                 pf->main_vsi_seid, 0,
787                                 TRUE, NULL, NULL);
788         if (ret)
789                 PMD_INIT_LOG(ERR,
790                         "Failed to add filter to drop flow control frames from VSIs.");
791 }
792
793 static int
794 floating_veb_list_handler(__rte_unused const char *key,
795                           const char *floating_veb_value,
796                           void *opaque)
797 {
798         int idx = 0;
799         unsigned int count = 0;
800         char *end = NULL;
801         int min, max;
802         bool *vf_floating_veb = opaque;
803
804         while (isblank(*floating_veb_value))
805                 floating_veb_value++;
806
807         /* Reset floating VEB configuration for VFs */
808         for (idx = 0; idx < I40E_MAX_VF; idx++)
809                 vf_floating_veb[idx] = false;
810
811         min = I40E_MAX_VF;
812         do {
813                 while (isblank(*floating_veb_value))
814                         floating_veb_value++;
815                 if (*floating_veb_value == '\0')
816                         return -1;
817                 errno = 0;
818                 idx = strtoul(floating_veb_value, &end, 10);
819                 if (errno || end == NULL)
820                         return -1;
821                 while (isblank(*end))
822                         end++;
823                 if (*end == '-') {
824                         min = idx;
825                 } else if ((*end == ';') || (*end == '\0')) {
826                         max = idx;
827                         if (min == I40E_MAX_VF)
828                                 min = idx;
829                         if (max >= I40E_MAX_VF)
830                                 max = I40E_MAX_VF - 1;
831                         for (idx = min; idx <= max; idx++) {
832                                 vf_floating_veb[idx] = true;
833                                 count++;
834                         }
835                         min = I40E_MAX_VF;
836                 } else {
837                         return -1;
838                 }
839                 floating_veb_value = end + 1;
840         } while (*end != '\0');
841
842         if (count == 0)
843                 return -1;
844
845         return 0;
846 }
847
848 static void
849 config_vf_floating_veb(struct rte_devargs *devargs,
850                        uint16_t floating_veb,
851                        bool *vf_floating_veb)
852 {
853         struct rte_kvargs *kvlist;
854         int i;
855         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
856
857         if (!floating_veb)
858                 return;
859         /* All the VFs attach to the floating VEB by default
860          * when the floating VEB is enabled.
861          */
862         for (i = 0; i < I40E_MAX_VF; i++)
863                 vf_floating_veb[i] = true;
864
865         if (devargs == NULL)
866                 return;
867
868         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
869         if (kvlist == NULL)
870                 return;
871
872         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
873                 rte_kvargs_free(kvlist);
874                 return;
875         }
876         /* When the floating_veb_list parameter exists, all the VFs
877          * will attach to the legacy VEB firstly, then configure VFs
878          * to the floating VEB according to the floating_veb_list.
879          */
880         if (rte_kvargs_process(kvlist, floating_veb_list,
881                                floating_veb_list_handler,
882                                vf_floating_veb) < 0) {
883                 rte_kvargs_free(kvlist);
884                 return;
885         }
886         rte_kvargs_free(kvlist);
887 }
888
889 static int
890 i40e_check_floating_handler(__rte_unused const char *key,
891                             const char *value,
892                             __rte_unused void *opaque)
893 {
894         if (strcmp(value, "1"))
895                 return -1;
896
897         return 0;
898 }
899
900 static int
901 is_floating_veb_supported(struct rte_devargs *devargs)
902 {
903         struct rte_kvargs *kvlist;
904         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
905
906         if (devargs == NULL)
907                 return 0;
908
909         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
910         if (kvlist == NULL)
911                 return 0;
912
913         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
914                 rte_kvargs_free(kvlist);
915                 return 0;
916         }
917         /* Floating VEB is enabled when there's key-value:
918          * enable_floating_veb=1
919          */
920         if (rte_kvargs_process(kvlist, floating_veb_key,
921                                i40e_check_floating_handler, NULL) < 0) {
922                 rte_kvargs_free(kvlist);
923                 return 0;
924         }
925         rte_kvargs_free(kvlist);
926
927         return 1;
928 }
929
930 static void
931 config_floating_veb(struct rte_eth_dev *dev)
932 {
933         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
934         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
935         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
936
937         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
938
939         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
940                 pf->floating_veb =
941                         is_floating_veb_supported(pci_dev->device.devargs);
942                 config_vf_floating_veb(pci_dev->device.devargs,
943                                        pf->floating_veb,
944                                        pf->floating_veb_list);
945         } else {
946                 pf->floating_veb = false;
947         }
948 }
949
950 #define I40E_L2_TAGS_S_TAG_SHIFT 1
951 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
952
953 static int
954 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
955 {
956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
957         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
958         char ethertype_hash_name[RTE_HASH_NAMESIZE];
959         int ret;
960
961         struct rte_hash_parameters ethertype_hash_params = {
962                 .name = ethertype_hash_name,
963                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
964                 .key_len = sizeof(struct i40e_ethertype_filter_input),
965                 .hash_func = rte_hash_crc,
966                 .hash_func_init_val = 0,
967                 .socket_id = rte_socket_id(),
968         };
969
970         /* Initialize ethertype filter rule list and hash */
971         TAILQ_INIT(&ethertype_rule->ethertype_list);
972         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
973                  "ethertype_%s", dev->device->name);
974         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
975         if (!ethertype_rule->hash_table) {
976                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
977                 return -EINVAL;
978         }
979         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
980                                        sizeof(struct i40e_ethertype_filter *) *
981                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
982                                        0);
983         if (!ethertype_rule->hash_map) {
984                 PMD_INIT_LOG(ERR,
985                              "Failed to allocate memory for ethertype hash map!");
986                 ret = -ENOMEM;
987                 goto err_ethertype_hash_map_alloc;
988         }
989
990         return 0;
991
992 err_ethertype_hash_map_alloc:
993         rte_hash_free(ethertype_rule->hash_table);
994
995         return ret;
996 }
997
998 static int
999 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1000 {
1001         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1002         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1003         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1004         int ret;
1005
1006         struct rte_hash_parameters tunnel_hash_params = {
1007                 .name = tunnel_hash_name,
1008                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1009                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1010                 .hash_func = rte_hash_crc,
1011                 .hash_func_init_val = 0,
1012                 .socket_id = rte_socket_id(),
1013         };
1014
1015         /* Initialize tunnel filter rule list and hash */
1016         TAILQ_INIT(&tunnel_rule->tunnel_list);
1017         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1018                  "tunnel_%s", dev->device->name);
1019         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1020         if (!tunnel_rule->hash_table) {
1021                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1022                 return -EINVAL;
1023         }
1024         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1025                                     sizeof(struct i40e_tunnel_filter *) *
1026                                     I40E_MAX_TUNNEL_FILTER_NUM,
1027                                     0);
1028         if (!tunnel_rule->hash_map) {
1029                 PMD_INIT_LOG(ERR,
1030                              "Failed to allocate memory for tunnel hash map!");
1031                 ret = -ENOMEM;
1032                 goto err_tunnel_hash_map_alloc;
1033         }
1034
1035         return 0;
1036
1037 err_tunnel_hash_map_alloc:
1038         rte_hash_free(tunnel_rule->hash_table);
1039
1040         return ret;
1041 }
1042
1043 static int
1044 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1045 {
1046         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1047         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1048         struct i40e_fdir_info *fdir_info = &pf->fdir;
1049         char fdir_hash_name[RTE_HASH_NAMESIZE];
1050         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1051         uint32_t best = hw->func_caps.fd_filters_best_effort;
1052         struct rte_bitmap *bmp = NULL;
1053         uint32_t bmp_size;
1054         void *mem = NULL;
1055         uint32_t i = 0;
1056         int ret;
1057
1058         struct rte_hash_parameters fdir_hash_params = {
1059                 .name = fdir_hash_name,
1060                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1061                 .key_len = sizeof(struct i40e_fdir_input),
1062                 .hash_func = rte_hash_crc,
1063                 .hash_func_init_val = 0,
1064                 .socket_id = rte_socket_id(),
1065         };
1066
1067         /* Initialize flow director filter rule list and hash */
1068         TAILQ_INIT(&fdir_info->fdir_list);
1069         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1070                  "fdir_%s", dev->device->name);
1071         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1072         if (!fdir_info->hash_table) {
1073                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1074                 return -EINVAL;
1075         }
1076
1077         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1078                                           sizeof(struct i40e_fdir_filter *) *
1079                                           I40E_MAX_FDIR_FILTER_NUM,
1080                                           0);
1081         if (!fdir_info->hash_map) {
1082                 PMD_INIT_LOG(ERR,
1083                              "Failed to allocate memory for fdir hash map!");
1084                 ret = -ENOMEM;
1085                 goto err_fdir_hash_map_alloc;
1086         }
1087
1088         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1089                         sizeof(struct i40e_fdir_filter) *
1090                         I40E_MAX_FDIR_FILTER_NUM,
1091                         0);
1092
1093         if (!fdir_info->fdir_filter_array) {
1094                 PMD_INIT_LOG(ERR,
1095                              "Failed to allocate memory for fdir filter array!");
1096                 ret = -ENOMEM;
1097                 goto err_fdir_filter_array_alloc;
1098         }
1099
1100         fdir_info->fdir_space_size = alloc + best;
1101         fdir_info->fdir_actual_cnt = 0;
1102         fdir_info->fdir_guarantee_total_space = alloc;
1103         fdir_info->fdir_guarantee_free_space =
1104                 fdir_info->fdir_guarantee_total_space;
1105
1106         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1107
1108         fdir_info->fdir_flow_pool.pool =
1109                         rte_zmalloc("i40e_fdir_entry",
1110                                 sizeof(struct i40e_fdir_entry) *
1111                                 fdir_info->fdir_space_size,
1112                                 0);
1113
1114         if (!fdir_info->fdir_flow_pool.pool) {
1115                 PMD_INIT_LOG(ERR,
1116                              "Failed to allocate memory for bitmap flow!");
1117                 ret = -ENOMEM;
1118                 goto err_fdir_bitmap_flow_alloc;
1119         }
1120
1121         for (i = 0; i < fdir_info->fdir_space_size; i++)
1122                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1123
1124         bmp_size =
1125                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1126         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1127         if (mem == NULL) {
1128                 PMD_INIT_LOG(ERR,
1129                              "Failed to allocate memory for fdir bitmap!");
1130                 ret = -ENOMEM;
1131                 goto err_fdir_mem_alloc;
1132         }
1133         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1134         if (bmp == NULL) {
1135                 PMD_INIT_LOG(ERR,
1136                              "Failed to initialization fdir bitmap!");
1137                 ret = -ENOMEM;
1138                 goto err_fdir_bmp_alloc;
1139         }
1140         for (i = 0; i < fdir_info->fdir_space_size; i++)
1141                 rte_bitmap_set(bmp, i);
1142
1143         fdir_info->fdir_flow_pool.bitmap = bmp;
1144
1145         return 0;
1146
1147 err_fdir_bmp_alloc:
1148         rte_free(mem);
1149 err_fdir_mem_alloc:
1150         rte_free(fdir_info->fdir_flow_pool.pool);
1151 err_fdir_bitmap_flow_alloc:
1152         rte_free(fdir_info->fdir_filter_array);
1153 err_fdir_filter_array_alloc:
1154         rte_free(fdir_info->hash_map);
1155 err_fdir_hash_map_alloc:
1156         rte_hash_free(fdir_info->hash_table);
1157
1158         return ret;
1159 }
1160
1161 static void
1162 i40e_init_customized_info(struct i40e_pf *pf)
1163 {
1164         int i;
1165
1166         /* Initialize customized pctype */
1167         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1168                 pf->customized_pctype[i].index = i;
1169                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1170                 pf->customized_pctype[i].valid = false;
1171         }
1172
1173         pf->gtp_support = false;
1174         pf->esp_support = false;
1175 }
1176
1177 static void
1178 i40e_init_filter_invalidation(struct i40e_pf *pf)
1179 {
1180         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1181         struct i40e_fdir_info *fdir_info = &pf->fdir;
1182         uint32_t glqf_ctl_reg = 0;
1183
1184         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1185         if (!pf->support_multi_driver) {
1186                 fdir_info->fdir_invalprio = 1;
1187                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1188                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1189                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1190         } else {
1191                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1192                         fdir_info->fdir_invalprio = 1;
1193                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1194                 } else {
1195                         fdir_info->fdir_invalprio = 0;
1196                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1197                 }
1198         }
1199 }
1200
1201 void
1202 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1203 {
1204         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1206         struct i40e_queue_regions *info = &pf->queue_region;
1207         uint16_t i;
1208
1209         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1210                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1211
1212         memset(info, 0, sizeof(struct i40e_queue_regions));
1213 }
1214
1215 static int
1216 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1217                                const char *value,
1218                                void *opaque)
1219 {
1220         struct i40e_pf *pf;
1221         unsigned long support_multi_driver;
1222         char *end;
1223
1224         pf = (struct i40e_pf *)opaque;
1225
1226         errno = 0;
1227         support_multi_driver = strtoul(value, &end, 10);
1228         if (errno != 0 || end == value || *end != 0) {
1229                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1230                 return -(EINVAL);
1231         }
1232
1233         if (support_multi_driver == 1 || support_multi_driver == 0)
1234                 pf->support_multi_driver = (bool)support_multi_driver;
1235         else
1236                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1237                             "enable global configuration by default."
1238                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1239         return 0;
1240 }
1241
1242 static int
1243 i40e_support_multi_driver(struct rte_eth_dev *dev)
1244 {
1245         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1246         struct rte_kvargs *kvlist;
1247         int kvargs_count;
1248
1249         /* Enable global configuration by default */
1250         pf->support_multi_driver = false;
1251
1252         if (!dev->device->devargs)
1253                 return 0;
1254
1255         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1256         if (!kvlist)
1257                 return -EINVAL;
1258
1259         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1260         if (!kvargs_count) {
1261                 rte_kvargs_free(kvlist);
1262                 return 0;
1263         }
1264
1265         if (kvargs_count > 1)
1266                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1267                             "the first invalid or last valid one is used !",
1268                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1269
1270         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1271                                i40e_parse_multi_drv_handler, pf) < 0) {
1272                 rte_kvargs_free(kvlist);
1273                 return -EINVAL;
1274         }
1275
1276         rte_kvargs_free(kvlist);
1277         return 0;
1278 }
1279
1280 static int
1281 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1282                                     uint32_t reg_addr, uint64_t reg_val,
1283                                     struct i40e_asq_cmd_details *cmd_details)
1284 {
1285         uint64_t ori_reg_val;
1286         struct rte_eth_dev *dev;
1287         int ret;
1288
1289         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1290         if (ret != I40E_SUCCESS) {
1291                 PMD_DRV_LOG(ERR,
1292                             "Fail to debug read from 0x%08x",
1293                             reg_addr);
1294                 return -EIO;
1295         }
1296         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1297
1298         if (ori_reg_val != reg_val)
1299                 PMD_DRV_LOG(WARNING,
1300                             "i40e device %s changed global register [0x%08x]."
1301                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1302                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1303
1304         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1305 }
1306
1307 static int
1308 i40e_parse_latest_vec_handler(__rte_unused const char *key,
1309                                 const char *value,
1310                                 void *opaque)
1311 {
1312         struct i40e_adapter *ad = opaque;
1313         int use_latest_vec;
1314
1315         use_latest_vec = atoi(value);
1316
1317         if (use_latest_vec != 0 && use_latest_vec != 1)
1318                 PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
1319
1320         ad->use_latest_vec = (uint8_t)use_latest_vec;
1321
1322         return 0;
1323 }
1324
1325 static int
1326 i40e_use_latest_vec(struct rte_eth_dev *dev)
1327 {
1328         struct i40e_adapter *ad =
1329                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1330         struct rte_kvargs *kvlist;
1331         int kvargs_count;
1332
1333         ad->use_latest_vec = false;
1334
1335         if (!dev->device->devargs)
1336                 return 0;
1337
1338         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1339         if (!kvlist)
1340                 return -EINVAL;
1341
1342         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
1343         if (!kvargs_count) {
1344                 rte_kvargs_free(kvlist);
1345                 return 0;
1346         }
1347
1348         if (kvargs_count > 1)
1349                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1350                             "the first invalid or last valid one is used !",
1351                             ETH_I40E_USE_LATEST_VEC);
1352
1353         if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
1354                                 i40e_parse_latest_vec_handler, ad) < 0) {
1355                 rte_kvargs_free(kvlist);
1356                 return -EINVAL;
1357         }
1358
1359         rte_kvargs_free(kvlist);
1360         return 0;
1361 }
1362
1363 static int
1364 read_vf_msg_config(__rte_unused const char *key,
1365                                const char *value,
1366                                void *opaque)
1367 {
1368         struct i40e_vf_msg_cfg *cfg = opaque;
1369
1370         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1371                         &cfg->ignore_second) != 3) {
1372                 memset(cfg, 0, sizeof(*cfg));
1373                 PMD_DRV_LOG(ERR, "format error! example: "
1374                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1375                 return -EINVAL;
1376         }
1377
1378         /*
1379          * If the message validation function been enabled, the 'period'
1380          * and 'ignore_second' must greater than 0.
1381          */
1382         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1383                 memset(cfg, 0, sizeof(*cfg));
1384                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1385                                 " number must be greater than 0!",
1386                                 ETH_I40E_VF_MSG_CFG);
1387                 return -EINVAL;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1395                 struct i40e_vf_msg_cfg *msg_cfg)
1396 {
1397         struct rte_kvargs *kvlist;
1398         int kvargs_count;
1399         int ret = 0;
1400
1401         memset(msg_cfg, 0, sizeof(*msg_cfg));
1402
1403         if (!dev->device->devargs)
1404                 return ret;
1405
1406         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1407         if (!kvlist)
1408                 return -EINVAL;
1409
1410         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1411         if (!kvargs_count)
1412                 goto free_end;
1413
1414         if (kvargs_count > 1) {
1415                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1416                                 ETH_I40E_VF_MSG_CFG);
1417                 ret = -EINVAL;
1418                 goto free_end;
1419         }
1420
1421         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1422                         read_vf_msg_config, msg_cfg) < 0)
1423                 ret = -EINVAL;
1424
1425 free_end:
1426         rte_kvargs_free(kvlist);
1427         return ret;
1428 }
1429
1430 #define I40E_ALARM_INTERVAL 50000 /* us */
1431
1432 static int
1433 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1434 {
1435         struct rte_pci_device *pci_dev;
1436         struct rte_intr_handle *intr_handle;
1437         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1439         struct i40e_vsi *vsi;
1440         int ret;
1441         uint32_t len, val;
1442         uint8_t aq_fail = 0;
1443
1444         PMD_INIT_FUNC_TRACE();
1445
1446         dev->dev_ops = &i40e_eth_dev_ops;
1447         dev->rx_queue_count = i40e_dev_rx_queue_count;
1448         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1449         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1450         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1451         dev->rx_pkt_burst = i40e_recv_pkts;
1452         dev->tx_pkt_burst = i40e_xmit_pkts;
1453         dev->tx_pkt_prepare = i40e_prep_pkts;
1454
1455         /* for secondary processes, we don't initialise any further as primary
1456          * has already done this work. Only check we don't need a different
1457          * RX function */
1458         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1459                 i40e_set_rx_function(dev);
1460                 i40e_set_tx_function(dev);
1461                 return 0;
1462         }
1463         i40e_set_default_ptype_table(dev);
1464         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1465         intr_handle = &pci_dev->intr_handle;
1466
1467         rte_eth_copy_pci_info(dev, pci_dev);
1468
1469         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1470         pf->adapter->eth_dev = dev;
1471         pf->dev_data = dev->data;
1472
1473         hw->back = I40E_PF_TO_ADAPTER(pf);
1474         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1475         if (!hw->hw_addr) {
1476                 PMD_INIT_LOG(ERR,
1477                         "Hardware is not available, as address is NULL");
1478                 return -ENODEV;
1479         }
1480
1481         hw->vendor_id = pci_dev->id.vendor_id;
1482         hw->device_id = pci_dev->id.device_id;
1483         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1484         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1485         hw->bus.device = pci_dev->addr.devid;
1486         hw->bus.func = pci_dev->addr.function;
1487         hw->adapter_stopped = 0;
1488         hw->adapter_closed = 0;
1489
1490         /* Init switch device pointer */
1491         hw->switch_dev = NULL;
1492
1493         /*
1494          * Switch Tag value should not be identical to either the First Tag
1495          * or Second Tag values. So set something other than common Ethertype
1496          * for internal switching.
1497          */
1498         hw->switch_tag = 0xffff;
1499
1500         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1501         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1502                 PMD_INIT_LOG(ERR, "\nERROR: "
1503                         "Firmware recovery mode detected. Limiting functionality.\n"
1504                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1505                         "User Guide for details on firmware recovery mode.");
1506                 return -EIO;
1507         }
1508
1509         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1510         /* Check if need to support multi-driver */
1511         i40e_support_multi_driver(dev);
1512         /* Check if users want the latest supported vec path */
1513         i40e_use_latest_vec(dev);
1514
1515         /* Make sure all is clean before doing PF reset */
1516         i40e_clear_hw(hw);
1517
1518         /* Reset here to make sure all is clean for each PF */
1519         ret = i40e_pf_reset(hw);
1520         if (ret) {
1521                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1522                 return ret;
1523         }
1524
1525         /* Initialize the shared code (base driver) */
1526         ret = i40e_init_shared_code(hw);
1527         if (ret) {
1528                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1529                 return ret;
1530         }
1531
1532         /* Initialize the parameters for adminq */
1533         i40e_init_adminq_parameter(hw);
1534         ret = i40e_init_adminq(hw);
1535         if (ret != I40E_SUCCESS) {
1536                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1537                 return -EIO;
1538         }
1539         /* Firmware of SFP x722 does not support adminq option */
1540         if (hw->device_id == I40E_DEV_ID_SFP_X722)
1541                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1542
1543         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1544                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1545                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1546                      ((hw->nvm.version >> 12) & 0xf),
1547                      ((hw->nvm.version >> 4) & 0xff),
1548                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1549
1550         /* Initialize the hardware */
1551         i40e_hw_init(dev);
1552
1553         i40e_config_automask(pf);
1554
1555         i40e_set_default_pctype_table(dev);
1556
1557         /*
1558          * To work around the NVM issue, initialize registers
1559          * for packet type of QinQ by software.
1560          * It should be removed once issues are fixed in NVM.
1561          */
1562         if (!pf->support_multi_driver)
1563                 i40e_GLQF_reg_init(hw);
1564
1565         /* Initialize the input set for filters (hash and fd) to default value */
1566         i40e_filter_input_set_init(pf);
1567
1568         /* initialise the L3_MAP register */
1569         if (!pf->support_multi_driver) {
1570                 ret = i40e_aq_debug_write_global_register(hw,
1571                                                    I40E_GLQF_L3_MAP(40),
1572                                                    0x00000028,  NULL);
1573                 if (ret)
1574                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1575                                      ret);
1576                 PMD_INIT_LOG(DEBUG,
1577                              "Global register 0x%08x is changed with 0x28",
1578                              I40E_GLQF_L3_MAP(40));
1579         }
1580
1581         /* Need the special FW version to support floating VEB */
1582         config_floating_veb(dev);
1583         /* Clear PXE mode */
1584         i40e_clear_pxe_mode(hw);
1585         i40e_dev_sync_phy_type(hw);
1586
1587         /*
1588          * On X710, performance number is far from the expectation on recent
1589          * firmware versions. The fix for this issue may not be integrated in
1590          * the following firmware version. So the workaround in software driver
1591          * is needed. It needs to modify the initial values of 3 internal only
1592          * registers. Note that the workaround can be removed when it is fixed
1593          * in firmware in the future.
1594          */
1595         i40e_configure_registers(hw);
1596
1597         /* Get hw capabilities */
1598         ret = i40e_get_cap(hw);
1599         if (ret != I40E_SUCCESS) {
1600                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1601                 goto err_get_capabilities;
1602         }
1603
1604         /* Initialize parameters for PF */
1605         ret = i40e_pf_parameter_init(dev);
1606         if (ret != 0) {
1607                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1608                 goto err_parameter_init;
1609         }
1610
1611         /* Initialize the queue management */
1612         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1613         if (ret < 0) {
1614                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1615                 goto err_qp_pool_init;
1616         }
1617         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1618                                 hw->func_caps.num_msix_vectors - 1);
1619         if (ret < 0) {
1620                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1621                 goto err_msix_pool_init;
1622         }
1623
1624         /* Initialize lan hmc */
1625         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1626                                 hw->func_caps.num_rx_qp, 0, 0);
1627         if (ret != I40E_SUCCESS) {
1628                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1629                 goto err_init_lan_hmc;
1630         }
1631
1632         /* Configure lan hmc */
1633         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1634         if (ret != I40E_SUCCESS) {
1635                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1636                 goto err_configure_lan_hmc;
1637         }
1638
1639         /* Get and check the mac address */
1640         i40e_get_mac_addr(hw, hw->mac.addr);
1641         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1642                 PMD_INIT_LOG(ERR, "mac address is not valid");
1643                 ret = -EIO;
1644                 goto err_get_mac_addr;
1645         }
1646         /* Copy the permanent MAC address */
1647         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1648                         (struct rte_ether_addr *)hw->mac.perm_addr);
1649
1650         /* Disable flow control */
1651         hw->fc.requested_mode = I40E_FC_NONE;
1652         i40e_set_fc(hw, &aq_fail, TRUE);
1653
1654         /* Set the global registers with default ether type value */
1655         if (!pf->support_multi_driver) {
1656                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1657                                          RTE_ETHER_TYPE_VLAN);
1658                 if (ret != I40E_SUCCESS) {
1659                         PMD_INIT_LOG(ERR,
1660                                      "Failed to set the default outer "
1661                                      "VLAN ether type");
1662                         goto err_setup_pf_switch;
1663                 }
1664         }
1665
1666         /* PF setup, which includes VSI setup */
1667         ret = i40e_pf_setup(pf);
1668         if (ret) {
1669                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1670                 goto err_setup_pf_switch;
1671         }
1672
1673         vsi = pf->main_vsi;
1674
1675         /* Disable double vlan by default */
1676         i40e_vsi_config_double_vlan(vsi, FALSE);
1677
1678         /* Disable S-TAG identification when floating_veb is disabled */
1679         if (!pf->floating_veb) {
1680                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1681                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1682                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1683                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1684                 }
1685         }
1686
1687         if (!vsi->max_macaddrs)
1688                 len = RTE_ETHER_ADDR_LEN;
1689         else
1690                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1691
1692         /* Should be after VSI initialized */
1693         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1694         if (!dev->data->mac_addrs) {
1695                 PMD_INIT_LOG(ERR,
1696                         "Failed to allocated memory for storing mac address");
1697                 goto err_mac_alloc;
1698         }
1699         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1700                                         &dev->data->mac_addrs[0]);
1701
1702         /* Init dcb to sw mode by default */
1703         ret = i40e_dcb_init_configure(dev, TRUE);
1704         if (ret != I40E_SUCCESS) {
1705                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1706                 pf->flags &= ~I40E_FLAG_DCB;
1707         }
1708         /* Update HW struct after DCB configuration */
1709         i40e_get_cap(hw);
1710
1711         /* initialize pf host driver to setup SRIOV resource if applicable */
1712         i40e_pf_host_init(dev);
1713
1714         /* register callback func to eal lib */
1715         rte_intr_callback_register(intr_handle,
1716                                    i40e_dev_interrupt_handler, dev);
1717
1718         /* configure and enable device interrupt */
1719         i40e_pf_config_irq0(hw, TRUE);
1720         i40e_pf_enable_irq0(hw);
1721
1722         /* enable uio intr after callback register */
1723         rte_intr_enable(intr_handle);
1724
1725         /* By default disable flexible payload in global configuration */
1726         if (!pf->support_multi_driver)
1727                 i40e_flex_payload_reg_set_default(hw);
1728
1729         /*
1730          * Add an ethertype filter to drop all flow control frames transmitted
1731          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1732          * frames to wire.
1733          */
1734         i40e_add_tx_flow_control_drop_filter(pf);
1735
1736         /* Set the max frame size to 0x2600 by default,
1737          * in case other drivers changed the default value.
1738          */
1739         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1740
1741         /* initialize mirror rule list */
1742         TAILQ_INIT(&pf->mirror_list);
1743
1744         /* initialize RSS rule list */
1745         TAILQ_INIT(&pf->rss_config_list);
1746
1747         /* initialize Traffic Manager configuration */
1748         i40e_tm_conf_init(dev);
1749
1750         /* Initialize customized information */
1751         i40e_init_customized_info(pf);
1752
1753         /* Initialize the filter invalidation configuration */
1754         i40e_init_filter_invalidation(pf);
1755
1756         ret = i40e_init_ethtype_filter_list(dev);
1757         if (ret < 0)
1758                 goto err_init_ethtype_filter_list;
1759         ret = i40e_init_tunnel_filter_list(dev);
1760         if (ret < 0)
1761                 goto err_init_tunnel_filter_list;
1762         ret = i40e_init_fdir_filter_list(dev);
1763         if (ret < 0)
1764                 goto err_init_fdir_filter_list;
1765
1766         /* initialize queue region configuration */
1767         i40e_init_queue_region_conf(dev);
1768
1769         /* initialize RSS configuration from rte_flow */
1770         memset(&pf->rss_info, 0,
1771                 sizeof(struct i40e_rte_flow_rss_conf));
1772
1773         /* reset all stats of the device, including pf and main vsi */
1774         i40e_dev_stats_reset(dev);
1775
1776         return 0;
1777
1778 err_init_fdir_filter_list:
1779         rte_free(pf->tunnel.hash_table);
1780         rte_free(pf->tunnel.hash_map);
1781 err_init_tunnel_filter_list:
1782         rte_free(pf->ethertype.hash_table);
1783         rte_free(pf->ethertype.hash_map);
1784 err_init_ethtype_filter_list:
1785         rte_free(dev->data->mac_addrs);
1786         dev->data->mac_addrs = NULL;
1787 err_mac_alloc:
1788         i40e_vsi_release(pf->main_vsi);
1789 err_setup_pf_switch:
1790 err_get_mac_addr:
1791 err_configure_lan_hmc:
1792         (void)i40e_shutdown_lan_hmc(hw);
1793 err_init_lan_hmc:
1794         i40e_res_pool_destroy(&pf->msix_pool);
1795 err_msix_pool_init:
1796         i40e_res_pool_destroy(&pf->qp_pool);
1797 err_qp_pool_init:
1798 err_parameter_init:
1799 err_get_capabilities:
1800         (void)i40e_shutdown_adminq(hw);
1801
1802         return ret;
1803 }
1804
1805 static void
1806 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1807 {
1808         struct i40e_ethertype_filter *p_ethertype;
1809         struct i40e_ethertype_rule *ethertype_rule;
1810
1811         ethertype_rule = &pf->ethertype;
1812         /* Remove all ethertype filter rules and hash */
1813         if (ethertype_rule->hash_map)
1814                 rte_free(ethertype_rule->hash_map);
1815         if (ethertype_rule->hash_table)
1816                 rte_hash_free(ethertype_rule->hash_table);
1817
1818         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1819                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1820                              p_ethertype, rules);
1821                 rte_free(p_ethertype);
1822         }
1823 }
1824
1825 static void
1826 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1827 {
1828         struct i40e_tunnel_filter *p_tunnel;
1829         struct i40e_tunnel_rule *tunnel_rule;
1830
1831         tunnel_rule = &pf->tunnel;
1832         /* Remove all tunnel director rules and hash */
1833         if (tunnel_rule->hash_map)
1834                 rte_free(tunnel_rule->hash_map);
1835         if (tunnel_rule->hash_table)
1836                 rte_hash_free(tunnel_rule->hash_table);
1837
1838         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1839                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1840                 rte_free(p_tunnel);
1841         }
1842 }
1843
1844 static void
1845 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1846 {
1847         struct i40e_fdir_filter *p_fdir;
1848         struct i40e_fdir_info *fdir_info;
1849
1850         fdir_info = &pf->fdir;
1851
1852         /* Remove all flow director rules */
1853         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1854                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1855 }
1856
1857 static void
1858 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1859 {
1860         struct i40e_fdir_info *fdir_info;
1861
1862         fdir_info = &pf->fdir;
1863
1864         /* flow director memory cleanup */
1865         if (fdir_info->hash_map)
1866                 rte_free(fdir_info->hash_map);
1867         if (fdir_info->hash_table)
1868                 rte_hash_free(fdir_info->hash_table);
1869         if (fdir_info->fdir_flow_pool.bitmap)
1870                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1871         if (fdir_info->fdir_flow_pool.pool)
1872                 rte_free(fdir_info->fdir_flow_pool.pool);
1873         if (fdir_info->fdir_filter_array)
1874                 rte_free(fdir_info->fdir_filter_array);
1875 }
1876
1877 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1878 {
1879         /*
1880          * Disable by default flexible payload
1881          * for corresponding L2/L3/L4 layers.
1882          */
1883         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1884         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1885         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1886 }
1887
1888 static int
1889 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1890 {
1891         struct i40e_hw *hw;
1892
1893         PMD_INIT_FUNC_TRACE();
1894
1895         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1896                 return 0;
1897
1898         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899
1900         if (hw->adapter_closed == 0)
1901                 i40e_dev_close(dev);
1902
1903         return 0;
1904 }
1905
1906 static int
1907 i40e_dev_configure(struct rte_eth_dev *dev)
1908 {
1909         struct i40e_adapter *ad =
1910                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1911         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1912         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1913         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1914         int i, ret;
1915
1916         ret = i40e_dev_sync_phy_type(hw);
1917         if (ret)
1918                 return ret;
1919
1920         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1921          * bulk allocation or vector Rx preconditions we will reset it.
1922          */
1923         ad->rx_bulk_alloc_allowed = true;
1924         ad->rx_vec_allowed = true;
1925         ad->tx_simple_allowed = true;
1926         ad->tx_vec_allowed = true;
1927
1928         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1929                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1930
1931         /* Only legacy filter API needs the following fdir config. So when the
1932          * legacy filter API is deprecated, the following codes should also be
1933          * removed.
1934          */
1935         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1936                 ret = i40e_fdir_setup(pf);
1937                 if (ret != I40E_SUCCESS) {
1938                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1939                         return -ENOTSUP;
1940                 }
1941                 ret = i40e_fdir_configure(dev);
1942                 if (ret < 0) {
1943                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1944                         goto err;
1945                 }
1946         } else
1947                 i40e_fdir_teardown(pf);
1948
1949         ret = i40e_dev_init_vlan(dev);
1950         if (ret < 0)
1951                 goto err;
1952
1953         /* VMDQ setup.
1954          *  General PMD driver call sequence are NIC init, configure,
1955          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1956          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1957          *  applicable. So, VMDQ setting has to be done before
1958          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1959          *  For RSS setting, it will try to calculate actual configured RX queue
1960          *  number, which will be available after rx_queue_setup(). dev_start()
1961          *  function is good to place RSS setup.
1962          */
1963         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1964                 ret = i40e_vmdq_setup(dev);
1965                 if (ret)
1966                         goto err;
1967         }
1968
1969         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1970                 ret = i40e_dcb_setup(dev);
1971                 if (ret) {
1972                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1973                         goto err_dcb;
1974                 }
1975         }
1976
1977         TAILQ_INIT(&pf->flow_list);
1978
1979         return 0;
1980
1981 err_dcb:
1982         /* need to release vmdq resource if exists */
1983         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1984                 i40e_vsi_release(pf->vmdq[i].vsi);
1985                 pf->vmdq[i].vsi = NULL;
1986         }
1987         rte_free(pf->vmdq);
1988         pf->vmdq = NULL;
1989 err:
1990         /* Need to release fdir resource if exists.
1991          * Only legacy filter API needs the following fdir config. So when the
1992          * legacy filter API is deprecated, the following code should also be
1993          * removed.
1994          */
1995         i40e_fdir_teardown(pf);
1996         return ret;
1997 }
1998
1999 void
2000 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
2001 {
2002         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2003         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2004         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2005         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2006         uint16_t msix_vect = vsi->msix_intr;
2007         uint16_t i;
2008
2009         for (i = 0; i < vsi->nb_qps; i++) {
2010                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2011                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2012                 rte_wmb();
2013         }
2014
2015         if (vsi->type != I40E_VSI_SRIOV) {
2016                 if (!rte_intr_allow_others(intr_handle)) {
2017                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2018                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2019                         I40E_WRITE_REG(hw,
2020                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2021                                        0);
2022                 } else {
2023                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2024                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2025                         I40E_WRITE_REG(hw,
2026                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2027                                                        msix_vect - 1), 0);
2028                 }
2029         } else {
2030                 uint32_t reg;
2031                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2032                         vsi->user_param + (msix_vect - 1);
2033
2034                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2035                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2036         }
2037         I40E_WRITE_FLUSH(hw);
2038 }
2039
2040 static void
2041 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2042                        int base_queue, int nb_queue,
2043                        uint16_t itr_idx)
2044 {
2045         int i;
2046         uint32_t val;
2047         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2048         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2049
2050         /* Bind all RX queues to allocated MSIX interrupt */
2051         for (i = 0; i < nb_queue; i++) {
2052                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2053                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2054                         ((base_queue + i + 1) <<
2055                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2056                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2057                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2058
2059                 if (i == nb_queue - 1)
2060                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2061                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2062         }
2063
2064         /* Write first RX queue to Link list register as the head element */
2065         if (vsi->type != I40E_VSI_SRIOV) {
2066                 uint16_t interval =
2067                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2068
2069                 if (msix_vect == I40E_MISC_VEC_ID) {
2070                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2071                                        (base_queue <<
2072                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2073                                        (0x0 <<
2074                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2075                         I40E_WRITE_REG(hw,
2076                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2077                                        interval);
2078                 } else {
2079                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2080                                        (base_queue <<
2081                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2082                                        (0x0 <<
2083                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2084                         I40E_WRITE_REG(hw,
2085                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2086                                                        msix_vect - 1),
2087                                        interval);
2088                 }
2089         } else {
2090                 uint32_t reg;
2091
2092                 if (msix_vect == I40E_MISC_VEC_ID) {
2093                         I40E_WRITE_REG(hw,
2094                                        I40E_VPINT_LNKLST0(vsi->user_param),
2095                                        (base_queue <<
2096                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2097                                        (0x0 <<
2098                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2099                 } else {
2100                         /* num_msix_vectors_vf needs to minus irq0 */
2101                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2102                                 vsi->user_param + (msix_vect - 1);
2103
2104                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2105                                        (base_queue <<
2106                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2107                                        (0x0 <<
2108                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2109                 }
2110         }
2111
2112         I40E_WRITE_FLUSH(hw);
2113 }
2114
2115 int
2116 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2117 {
2118         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2119         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2120         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2121         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2122         uint16_t msix_vect = vsi->msix_intr;
2123         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2124         uint16_t queue_idx = 0;
2125         int record = 0;
2126         int i;
2127
2128         for (i = 0; i < vsi->nb_qps; i++) {
2129                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2130                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2131         }
2132
2133         /* VF bind interrupt */
2134         if (vsi->type == I40E_VSI_SRIOV) {
2135                 if (vsi->nb_msix == 0) {
2136                         PMD_DRV_LOG(ERR, "No msix resource");
2137                         return -EINVAL;
2138                 }
2139                 __vsi_queues_bind_intr(vsi, msix_vect,
2140                                        vsi->base_queue, vsi->nb_qps,
2141                                        itr_idx);
2142                 return 0;
2143         }
2144
2145         /* PF & VMDq bind interrupt */
2146         if (rte_intr_dp_is_en(intr_handle)) {
2147                 if (vsi->type == I40E_VSI_MAIN) {
2148                         queue_idx = 0;
2149                         record = 1;
2150                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2151                         struct i40e_vsi *main_vsi =
2152                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2153                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2154                         record = 1;
2155                 }
2156         }
2157
2158         for (i = 0; i < vsi->nb_used_qps; i++) {
2159                 if (vsi->nb_msix == 0) {
2160                         PMD_DRV_LOG(ERR, "No msix resource");
2161                         return -EINVAL;
2162                 } else if (nb_msix <= 1) {
2163                         if (!rte_intr_allow_others(intr_handle))
2164                                 /* allow to share MISC_VEC_ID */
2165                                 msix_vect = I40E_MISC_VEC_ID;
2166
2167                         /* no enough msix_vect, map all to one */
2168                         __vsi_queues_bind_intr(vsi, msix_vect,
2169                                                vsi->base_queue + i,
2170                                                vsi->nb_used_qps - i,
2171                                                itr_idx);
2172                         for (; !!record && i < vsi->nb_used_qps; i++)
2173                                 intr_handle->intr_vec[queue_idx + i] =
2174                                         msix_vect;
2175                         break;
2176                 }
2177                 /* 1:1 queue/msix_vect mapping */
2178                 __vsi_queues_bind_intr(vsi, msix_vect,
2179                                        vsi->base_queue + i, 1,
2180                                        itr_idx);
2181                 if (!!record)
2182                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2183
2184                 msix_vect++;
2185                 nb_msix--;
2186         }
2187
2188         return 0;
2189 }
2190
2191 void
2192 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2193 {
2194         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2195         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2196         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2197         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2198         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2199         uint16_t msix_intr, i;
2200
2201         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2202                 for (i = 0; i < vsi->nb_msix; i++) {
2203                         msix_intr = vsi->msix_intr + i;
2204                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2205                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2206                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2207                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2208                 }
2209         else
2210                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2211                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2212                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2213                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2214
2215         I40E_WRITE_FLUSH(hw);
2216 }
2217
2218 void
2219 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2220 {
2221         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2222         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2223         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2224         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2225         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2226         uint16_t msix_intr, i;
2227
2228         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2229                 for (i = 0; i < vsi->nb_msix; i++) {
2230                         msix_intr = vsi->msix_intr + i;
2231                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2232                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2233                 }
2234         else
2235                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2236                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2237
2238         I40E_WRITE_FLUSH(hw);
2239 }
2240
2241 static inline uint8_t
2242 i40e_parse_link_speeds(uint16_t link_speeds)
2243 {
2244         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2245
2246         if (link_speeds & ETH_LINK_SPEED_40G)
2247                 link_speed |= I40E_LINK_SPEED_40GB;
2248         if (link_speeds & ETH_LINK_SPEED_25G)
2249                 link_speed |= I40E_LINK_SPEED_25GB;
2250         if (link_speeds & ETH_LINK_SPEED_20G)
2251                 link_speed |= I40E_LINK_SPEED_20GB;
2252         if (link_speeds & ETH_LINK_SPEED_10G)
2253                 link_speed |= I40E_LINK_SPEED_10GB;
2254         if (link_speeds & ETH_LINK_SPEED_1G)
2255                 link_speed |= I40E_LINK_SPEED_1GB;
2256         if (link_speeds & ETH_LINK_SPEED_100M)
2257                 link_speed |= I40E_LINK_SPEED_100MB;
2258
2259         return link_speed;
2260 }
2261
2262 static int
2263 i40e_phy_conf_link(struct i40e_hw *hw,
2264                    uint8_t abilities,
2265                    uint8_t force_speed,
2266                    bool is_up)
2267 {
2268         enum i40e_status_code status;
2269         struct i40e_aq_get_phy_abilities_resp phy_ab;
2270         struct i40e_aq_set_phy_config phy_conf;
2271         enum i40e_aq_phy_type cnt;
2272         uint8_t avail_speed;
2273         uint32_t phy_type_mask = 0;
2274
2275         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2276                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2277                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2278                         I40E_AQ_PHY_FLAG_LOW_POWER;
2279         int ret = -ENOTSUP;
2280
2281         /* To get phy capabilities of available speeds. */
2282         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2283                                               NULL);
2284         if (status) {
2285                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2286                                 status);
2287                 return ret;
2288         }
2289         avail_speed = phy_ab.link_speed;
2290
2291         /* To get the current phy config. */
2292         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2293                                               NULL);
2294         if (status) {
2295                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2296                                 status);
2297                 return ret;
2298         }
2299
2300         /* If link needs to go up and it is in autoneg mode the speed is OK,
2301          * no need to set up again.
2302          */
2303         if (is_up && phy_ab.phy_type != 0 &&
2304                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2305                      phy_ab.link_speed != 0)
2306                 return I40E_SUCCESS;
2307
2308         memset(&phy_conf, 0, sizeof(phy_conf));
2309
2310         /* bits 0-2 use the values from get_phy_abilities_resp */
2311         abilities &= ~mask;
2312         abilities |= phy_ab.abilities & mask;
2313
2314         phy_conf.abilities = abilities;
2315
2316         /* If link needs to go up, but the force speed is not supported,
2317          * Warn users and config the default available speeds.
2318          */
2319         if (is_up && !(force_speed & avail_speed)) {
2320                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2321                 phy_conf.link_speed = avail_speed;
2322         } else {
2323                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2324         }
2325
2326         /* PHY type mask needs to include each type except PHY type extension */
2327         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2328                 phy_type_mask |= 1 << cnt;
2329
2330         /* use get_phy_abilities_resp value for the rest */
2331         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2332         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2333                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2334                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2335         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2336         phy_conf.eee_capability = phy_ab.eee_capability;
2337         phy_conf.eeer = phy_ab.eeer_val;
2338         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2339
2340         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2341                     phy_ab.abilities, phy_ab.link_speed);
2342         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2343                     phy_conf.abilities, phy_conf.link_speed);
2344
2345         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2346         if (status)
2347                 return ret;
2348
2349         return I40E_SUCCESS;
2350 }
2351
2352 static int
2353 i40e_apply_link_speed(struct rte_eth_dev *dev)
2354 {
2355         uint8_t speed;
2356         uint8_t abilities = 0;
2357         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2358         struct rte_eth_conf *conf = &dev->data->dev_conf;
2359
2360         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2361                      I40E_AQ_PHY_LINK_ENABLED;
2362
2363         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2364                 conf->link_speeds = ETH_LINK_SPEED_40G |
2365                                     ETH_LINK_SPEED_25G |
2366                                     ETH_LINK_SPEED_20G |
2367                                     ETH_LINK_SPEED_10G |
2368                                     ETH_LINK_SPEED_1G |
2369                                     ETH_LINK_SPEED_100M;
2370
2371                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2372         } else {
2373                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2374         }
2375         speed = i40e_parse_link_speeds(conf->link_speeds);
2376
2377         return i40e_phy_conf_link(hw, abilities, speed, true);
2378 }
2379
2380 static int
2381 i40e_dev_start(struct rte_eth_dev *dev)
2382 {
2383         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2384         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2385         struct i40e_vsi *main_vsi = pf->main_vsi;
2386         int ret, i;
2387         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2388         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2389         uint32_t intr_vector = 0;
2390         struct i40e_vsi *vsi;
2391         uint16_t nb_rxq, nb_txq;
2392
2393         hw->adapter_stopped = 0;
2394
2395         rte_intr_disable(intr_handle);
2396
2397         if ((rte_intr_cap_multiple(intr_handle) ||
2398              !RTE_ETH_DEV_SRIOV(dev).active) &&
2399             dev->data->dev_conf.intr_conf.rxq != 0) {
2400                 intr_vector = dev->data->nb_rx_queues;
2401                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2402                 if (ret)
2403                         return ret;
2404         }
2405
2406         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2407                 intr_handle->intr_vec =
2408                         rte_zmalloc("intr_vec",
2409                                     dev->data->nb_rx_queues * sizeof(int),
2410                                     0);
2411                 if (!intr_handle->intr_vec) {
2412                         PMD_INIT_LOG(ERR,
2413                                 "Failed to allocate %d rx_queues intr_vec",
2414                                 dev->data->nb_rx_queues);
2415                         return -ENOMEM;
2416                 }
2417         }
2418
2419         /* Initialize VSI */
2420         ret = i40e_dev_rxtx_init(pf);
2421         if (ret != I40E_SUCCESS) {
2422                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2423                 return ret;
2424         }
2425
2426         /* Map queues with MSIX interrupt */
2427         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2428                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2429         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2430         if (ret < 0)
2431                 return ret;
2432         i40e_vsi_enable_queues_intr(main_vsi);
2433
2434         /* Map VMDQ VSI queues with MSIX interrupt */
2435         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2436                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2437                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2438                                                 I40E_ITR_INDEX_DEFAULT);
2439                 if (ret < 0)
2440                         return ret;
2441                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2442         }
2443
2444         /* Enable all queues which have been configured */
2445         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2446                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2447                 if (ret)
2448                         goto rx_err;
2449         }
2450
2451         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2452                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2453                 if (ret)
2454                         goto tx_err;
2455         }
2456
2457         /* Enable receiving broadcast packets */
2458         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2459         if (ret != I40E_SUCCESS)
2460                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2461
2462         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2463                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2464                                                 true, NULL);
2465                 if (ret != I40E_SUCCESS)
2466                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2467         }
2468
2469         /* Enable the VLAN promiscuous mode. */
2470         if (pf->vfs) {
2471                 for (i = 0; i < pf->vf_num; i++) {
2472                         vsi = pf->vfs[i].vsi;
2473                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2474                                                      true, NULL);
2475                 }
2476         }
2477
2478         /* Enable mac loopback mode */
2479         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2480             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2481                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2482                 if (ret != I40E_SUCCESS) {
2483                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2484                         goto tx_err;
2485                 }
2486         }
2487
2488         /* Apply link configure */
2489         ret = i40e_apply_link_speed(dev);
2490         if (I40E_SUCCESS != ret) {
2491                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2492                 goto tx_err;
2493         }
2494
2495         if (!rte_intr_allow_others(intr_handle)) {
2496                 rte_intr_callback_unregister(intr_handle,
2497                                              i40e_dev_interrupt_handler,
2498                                              (void *)dev);
2499                 /* configure and enable device interrupt */
2500                 i40e_pf_config_irq0(hw, FALSE);
2501                 i40e_pf_enable_irq0(hw);
2502
2503                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2504                         PMD_INIT_LOG(INFO,
2505                                 "lsc won't enable because of no intr multiplex");
2506         } else {
2507                 ret = i40e_aq_set_phy_int_mask(hw,
2508                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2509                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2510                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2511                 if (ret != I40E_SUCCESS)
2512                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2513
2514                 /* Call get_link_info aq commond to enable/disable LSE */
2515                 i40e_dev_link_update(dev, 0);
2516         }
2517
2518         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2519                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2520                                   i40e_dev_alarm_handler, dev);
2521         } else {
2522                 /* enable uio intr after callback register */
2523                 rte_intr_enable(intr_handle);
2524         }
2525
2526         i40e_filter_restore(pf);
2527
2528         if (pf->tm_conf.root && !pf->tm_conf.committed)
2529                 PMD_DRV_LOG(WARNING,
2530                             "please call hierarchy_commit() "
2531                             "before starting the port");
2532
2533         return I40E_SUCCESS;
2534
2535 tx_err:
2536         for (i = 0; i < nb_txq; i++)
2537                 i40e_dev_tx_queue_stop(dev, i);
2538 rx_err:
2539         for (i = 0; i < nb_rxq; i++)
2540                 i40e_dev_rx_queue_stop(dev, i);
2541
2542         return ret;
2543 }
2544
2545 static void
2546 i40e_dev_stop(struct rte_eth_dev *dev)
2547 {
2548         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2549         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2550         struct i40e_vsi *main_vsi = pf->main_vsi;
2551         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2552         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2553         int i;
2554
2555         if (hw->adapter_stopped == 1)
2556                 return;
2557
2558         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2559                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2560                 rte_intr_enable(intr_handle);
2561         }
2562
2563         /* Disable all queues */
2564         for (i = 0; i < dev->data->nb_tx_queues; i++)
2565                 i40e_dev_tx_queue_stop(dev, i);
2566
2567         for (i = 0; i < dev->data->nb_rx_queues; i++)
2568                 i40e_dev_rx_queue_stop(dev, i);
2569
2570         /* un-map queues with interrupt registers */
2571         i40e_vsi_disable_queues_intr(main_vsi);
2572         i40e_vsi_queues_unbind_intr(main_vsi);
2573
2574         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2575                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2576                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2577         }
2578
2579         /* Clear all queues and release memory */
2580         i40e_dev_clear_queues(dev);
2581
2582         /* Set link down */
2583         i40e_dev_set_link_down(dev);
2584
2585         if (!rte_intr_allow_others(intr_handle))
2586                 /* resume to the default handler */
2587                 rte_intr_callback_register(intr_handle,
2588                                            i40e_dev_interrupt_handler,
2589                                            (void *)dev);
2590
2591         /* Clean datapath event and queue/vec mapping */
2592         rte_intr_efd_disable(intr_handle);
2593         if (intr_handle->intr_vec) {
2594                 rte_free(intr_handle->intr_vec);
2595                 intr_handle->intr_vec = NULL;
2596         }
2597
2598         /* reset hierarchy commit */
2599         pf->tm_conf.committed = false;
2600
2601         hw->adapter_stopped = 1;
2602         dev->data->dev_started = 0;
2603
2604         pf->adapter->rss_reta_updated = 0;
2605 }
2606
2607 static int
2608 i40e_dev_close(struct rte_eth_dev *dev)
2609 {
2610         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2611         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2612         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2613         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2614         struct i40e_mirror_rule *p_mirror;
2615         struct i40e_filter_control_settings settings;
2616         struct rte_flow *p_flow;
2617         uint32_t reg;
2618         int i;
2619         int ret;
2620         uint8_t aq_fail = 0;
2621         int retries = 0;
2622
2623         PMD_INIT_FUNC_TRACE();
2624         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2625                 return 0;
2626
2627         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2628         if (ret)
2629                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2630
2631
2632         i40e_dev_stop(dev);
2633
2634         /* Remove all mirror rules */
2635         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2636                 ret = i40e_aq_del_mirror_rule(hw,
2637                                               pf->main_vsi->veb->seid,
2638                                               p_mirror->rule_type,
2639                                               p_mirror->entries,
2640                                               p_mirror->num_entries,
2641                                               p_mirror->id);
2642                 if (ret < 0)
2643                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2644                                     "status = %d, aq_err = %d.", ret,
2645                                     hw->aq.asq_last_status);
2646
2647                 /* remove mirror software resource anyway */
2648                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2649                 rte_free(p_mirror);
2650                 pf->nb_mirror_rule--;
2651         }
2652
2653         i40e_dev_free_queues(dev);
2654
2655         /* Disable interrupt */
2656         i40e_pf_disable_irq0(hw);
2657         rte_intr_disable(intr_handle);
2658
2659         /*
2660          * Only legacy filter API needs the following fdir config. So when the
2661          * legacy filter API is deprecated, the following code should also be
2662          * removed.
2663          */
2664         i40e_fdir_teardown(pf);
2665
2666         /* shutdown and destroy the HMC */
2667         i40e_shutdown_lan_hmc(hw);
2668
2669         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2670                 i40e_vsi_release(pf->vmdq[i].vsi);
2671                 pf->vmdq[i].vsi = NULL;
2672         }
2673         rte_free(pf->vmdq);
2674         pf->vmdq = NULL;
2675
2676         /* release all the existing VSIs and VEBs */
2677         i40e_vsi_release(pf->main_vsi);
2678
2679         /* shutdown the adminq */
2680         i40e_aq_queue_shutdown(hw, true);
2681         i40e_shutdown_adminq(hw);
2682
2683         i40e_res_pool_destroy(&pf->qp_pool);
2684         i40e_res_pool_destroy(&pf->msix_pool);
2685
2686         /* Disable flexible payload in global configuration */
2687         if (!pf->support_multi_driver)
2688                 i40e_flex_payload_reg_set_default(hw);
2689
2690         /* force a PF reset to clean anything leftover */
2691         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2692         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2693                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2694         I40E_WRITE_FLUSH(hw);
2695
2696         dev->dev_ops = NULL;
2697         dev->rx_pkt_burst = NULL;
2698         dev->tx_pkt_burst = NULL;
2699
2700         /* Clear PXE mode */
2701         i40e_clear_pxe_mode(hw);
2702
2703         /* Unconfigure filter control */
2704         memset(&settings, 0, sizeof(settings));
2705         ret = i40e_set_filter_control(hw, &settings);
2706         if (ret)
2707                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2708                                         ret);
2709
2710         /* Disable flow control */
2711         hw->fc.requested_mode = I40E_FC_NONE;
2712         i40e_set_fc(hw, &aq_fail, TRUE);
2713
2714         /* uninitialize pf host driver */
2715         i40e_pf_host_uninit(dev);
2716
2717         do {
2718                 ret = rte_intr_callback_unregister(intr_handle,
2719                                 i40e_dev_interrupt_handler, dev);
2720                 if (ret >= 0 || ret == -ENOENT) {
2721                         break;
2722                 } else if (ret != -EAGAIN) {
2723                         PMD_INIT_LOG(ERR,
2724                                  "intr callback unregister failed: %d",
2725                                  ret);
2726                 }
2727                 i40e_msec_delay(500);
2728         } while (retries++ < 5);
2729
2730         i40e_rm_ethtype_filter_list(pf);
2731         i40e_rm_tunnel_filter_list(pf);
2732         i40e_rm_fdir_filter_list(pf);
2733
2734         /* Remove all flows */
2735         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2736                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2737                 /* Do not free FDIR flows since they are static allocated */
2738                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2739                         rte_free(p_flow);
2740         }
2741
2742         /* release the fdir static allocated memory */
2743         i40e_fdir_memory_cleanup(pf);
2744
2745         /* Remove all Traffic Manager configuration */
2746         i40e_tm_conf_uninit(dev);
2747
2748         hw->adapter_closed = 1;
2749         return 0;
2750 }
2751
2752 /*
2753  * Reset PF device only to re-initialize resources in PMD layer
2754  */
2755 static int
2756 i40e_dev_reset(struct rte_eth_dev *dev)
2757 {
2758         int ret;
2759
2760         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2761          * its VF to make them align with it. The detailed notification
2762          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2763          * To avoid unexpected behavior in VF, currently reset of PF with
2764          * SR-IOV activation is not supported. It might be supported later.
2765          */
2766         if (dev->data->sriov.active)
2767                 return -ENOTSUP;
2768
2769         ret = eth_i40e_dev_uninit(dev);
2770         if (ret)
2771                 return ret;
2772
2773         ret = eth_i40e_dev_init(dev, NULL);
2774
2775         return ret;
2776 }
2777
2778 static int
2779 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2780 {
2781         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2782         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2783         struct i40e_vsi *vsi = pf->main_vsi;
2784         int status;
2785
2786         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2787                                                      true, NULL, true);
2788         if (status != I40E_SUCCESS) {
2789                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2790                 return -EAGAIN;
2791         }
2792
2793         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2794                                                         TRUE, NULL);
2795         if (status != I40E_SUCCESS) {
2796                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2797                 /* Rollback unicast promiscuous mode */
2798                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2799                                                     false, NULL, true);
2800                 return -EAGAIN;
2801         }
2802
2803         return 0;
2804 }
2805
2806 static int
2807 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2808 {
2809         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2810         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2811         struct i40e_vsi *vsi = pf->main_vsi;
2812         int status;
2813
2814         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2815                                                      false, NULL, true);
2816         if (status != I40E_SUCCESS) {
2817                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2818                 return -EAGAIN;
2819         }
2820
2821         /* must remain in all_multicast mode */
2822         if (dev->data->all_multicast == 1)
2823                 return 0;
2824
2825         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2826                                                         false, NULL);
2827         if (status != I40E_SUCCESS) {
2828                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2829                 /* Rollback unicast promiscuous mode */
2830                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2831                                                     true, NULL, true);
2832                 return -EAGAIN;
2833         }
2834
2835         return 0;
2836 }
2837
2838 static int
2839 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2840 {
2841         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2842         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2843         struct i40e_vsi *vsi = pf->main_vsi;
2844         int ret;
2845
2846         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2847         if (ret != I40E_SUCCESS) {
2848                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2849                 return -EAGAIN;
2850         }
2851
2852         return 0;
2853 }
2854
2855 static int
2856 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2857 {
2858         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2859         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2860         struct i40e_vsi *vsi = pf->main_vsi;
2861         int ret;
2862
2863         if (dev->data->promiscuous == 1)
2864                 return 0; /* must remain in all_multicast mode */
2865
2866         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2867                                 vsi->seid, FALSE, NULL);
2868         if (ret != I40E_SUCCESS) {
2869                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2870                 return -EAGAIN;
2871         }
2872
2873         return 0;
2874 }
2875
2876 /*
2877  * Set device link up.
2878  */
2879 static int
2880 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2881 {
2882         /* re-apply link speed setting */
2883         return i40e_apply_link_speed(dev);
2884 }
2885
2886 /*
2887  * Set device link down.
2888  */
2889 static int
2890 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2891 {
2892         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2893         uint8_t abilities = 0;
2894         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2895
2896         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2897         return i40e_phy_conf_link(hw, abilities, speed, false);
2898 }
2899
2900 static __rte_always_inline void
2901 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2902 {
2903 /* Link status registers and values*/
2904 #define I40E_PRTMAC_LINKSTA             0x001E2420
2905 #define I40E_REG_LINK_UP                0x40000080
2906 #define I40E_PRTMAC_MACC                0x001E24E0
2907 #define I40E_REG_MACC_25GB              0x00020000
2908 #define I40E_REG_SPEED_MASK             0x38000000
2909 #define I40E_REG_SPEED_0                0x00000000
2910 #define I40E_REG_SPEED_1                0x08000000
2911 #define I40E_REG_SPEED_2                0x10000000
2912 #define I40E_REG_SPEED_3                0x18000000
2913 #define I40E_REG_SPEED_4                0x20000000
2914         uint32_t link_speed;
2915         uint32_t reg_val;
2916
2917         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2918         link_speed = reg_val & I40E_REG_SPEED_MASK;
2919         reg_val &= I40E_REG_LINK_UP;
2920         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2921
2922         if (unlikely(link->link_status == 0))
2923                 return;
2924
2925         /* Parse the link status */
2926         switch (link_speed) {
2927         case I40E_REG_SPEED_0:
2928                 link->link_speed = ETH_SPEED_NUM_100M;
2929                 break;
2930         case I40E_REG_SPEED_1:
2931                 link->link_speed = ETH_SPEED_NUM_1G;
2932                 break;
2933         case I40E_REG_SPEED_2:
2934                 if (hw->mac.type == I40E_MAC_X722)
2935                         link->link_speed = ETH_SPEED_NUM_2_5G;
2936                 else
2937                         link->link_speed = ETH_SPEED_NUM_10G;
2938                 break;
2939         case I40E_REG_SPEED_3:
2940                 if (hw->mac.type == I40E_MAC_X722) {
2941                         link->link_speed = ETH_SPEED_NUM_5G;
2942                 } else {
2943                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2944
2945                         if (reg_val & I40E_REG_MACC_25GB)
2946                                 link->link_speed = ETH_SPEED_NUM_25G;
2947                         else
2948                                 link->link_speed = ETH_SPEED_NUM_40G;
2949                 }
2950                 break;
2951         case I40E_REG_SPEED_4:
2952                 if (hw->mac.type == I40E_MAC_X722)
2953                         link->link_speed = ETH_SPEED_NUM_10G;
2954                 else
2955                         link->link_speed = ETH_SPEED_NUM_20G;
2956                 break;
2957         default:
2958                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2959                 break;
2960         }
2961 }
2962
2963 static __rte_always_inline void
2964 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2965         bool enable_lse, int wait_to_complete)
2966 {
2967 #define CHECK_INTERVAL             100  /* 100ms */
2968 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2969         uint32_t rep_cnt = MAX_REPEAT_TIME;
2970         struct i40e_link_status link_status;
2971         int status;
2972
2973         memset(&link_status, 0, sizeof(link_status));
2974
2975         do {
2976                 memset(&link_status, 0, sizeof(link_status));
2977
2978                 /* Get link status information from hardware */
2979                 status = i40e_aq_get_link_info(hw, enable_lse,
2980                                                 &link_status, NULL);
2981                 if (unlikely(status != I40E_SUCCESS)) {
2982                         link->link_speed = ETH_SPEED_NUM_NONE;
2983                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2984                         PMD_DRV_LOG(ERR, "Failed to get link info");
2985                         return;
2986                 }
2987
2988                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2989                 if (!wait_to_complete || link->link_status)
2990                         break;
2991
2992                 rte_delay_ms(CHECK_INTERVAL);
2993         } while (--rep_cnt);
2994
2995         /* Parse the link status */
2996         switch (link_status.link_speed) {
2997         case I40E_LINK_SPEED_100MB:
2998                 link->link_speed = ETH_SPEED_NUM_100M;
2999                 break;
3000         case I40E_LINK_SPEED_1GB:
3001                 link->link_speed = ETH_SPEED_NUM_1G;
3002                 break;
3003         case I40E_LINK_SPEED_10GB:
3004                 link->link_speed = ETH_SPEED_NUM_10G;
3005                 break;
3006         case I40E_LINK_SPEED_20GB:
3007                 link->link_speed = ETH_SPEED_NUM_20G;
3008                 break;
3009         case I40E_LINK_SPEED_25GB:
3010                 link->link_speed = ETH_SPEED_NUM_25G;
3011                 break;
3012         case I40E_LINK_SPEED_40GB:
3013                 link->link_speed = ETH_SPEED_NUM_40G;
3014                 break;
3015         default:
3016                 if (link->link_status)
3017                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3018                 else
3019                         link->link_speed = ETH_SPEED_NUM_NONE;
3020                 break;
3021         }
3022 }
3023
3024 int
3025 i40e_dev_link_update(struct rte_eth_dev *dev,
3026                      int wait_to_complete)
3027 {
3028         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3029         struct rte_eth_link link;
3030         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3031         int ret;
3032
3033         memset(&link, 0, sizeof(link));
3034
3035         /* i40e uses full duplex only */
3036         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3037         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3038                         ETH_LINK_SPEED_FIXED);
3039
3040         if (!wait_to_complete && !enable_lse)
3041                 update_link_reg(hw, &link);
3042         else
3043                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3044
3045         if (hw->switch_dev)
3046                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3047
3048         ret = rte_eth_linkstatus_set(dev, &link);
3049         i40e_notify_all_vfs_link_status(dev);
3050
3051         return ret;
3052 }
3053
3054 static void
3055 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3056                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3057                           uint64_t *stat, uint64_t *prev_stat)
3058 {
3059         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3060         /* enlarge the limitation when statistics counters overflowed */
3061         if (offset_loaded) {
3062                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3063                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3064                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3065         }
3066         *prev_stat = *stat;
3067 }
3068
3069 /* Get all the statistics of a VSI */
3070 void
3071 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3072 {
3073         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3074         struct i40e_eth_stats *nes = &vsi->eth_stats;
3075         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3076         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3077
3078         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3079                                   vsi->offset_loaded, &oes->rx_bytes,
3080                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3081         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3082                             vsi->offset_loaded, &oes->rx_unicast,
3083                             &nes->rx_unicast);
3084         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3085                             vsi->offset_loaded, &oes->rx_multicast,
3086                             &nes->rx_multicast);
3087         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3088                             vsi->offset_loaded, &oes->rx_broadcast,
3089                             &nes->rx_broadcast);
3090         /* exclude CRC bytes */
3091         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3092                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3093
3094         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3095                             &oes->rx_discards, &nes->rx_discards);
3096         /* GLV_REPC not supported */
3097         /* GLV_RMPC not supported */
3098         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3099                             &oes->rx_unknown_protocol,
3100                             &nes->rx_unknown_protocol);
3101         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3102                                   vsi->offset_loaded, &oes->tx_bytes,
3103                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3104         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3105                             vsi->offset_loaded, &oes->tx_unicast,
3106                             &nes->tx_unicast);
3107         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3108                             vsi->offset_loaded, &oes->tx_multicast,
3109                             &nes->tx_multicast);
3110         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3111                             vsi->offset_loaded,  &oes->tx_broadcast,
3112                             &nes->tx_broadcast);
3113         /* GLV_TDPC not supported */
3114         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3115                             &oes->tx_errors, &nes->tx_errors);
3116         vsi->offset_loaded = true;
3117
3118         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3119                     vsi->vsi_id);
3120         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3121         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3122         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3123         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3124         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3125         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3126                     nes->rx_unknown_protocol);
3127         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3128         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3129         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3130         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3131         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3132         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3133         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3134                     vsi->vsi_id);
3135 }
3136
3137 static void
3138 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3139 {
3140         unsigned int i;
3141         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3142         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3143
3144         /* Get rx/tx bytes of internal transfer packets */
3145         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3146                                   I40E_GLV_GORCL(hw->port),
3147                                   pf->offset_loaded,
3148                                   &pf->internal_stats_offset.rx_bytes,
3149                                   &pf->internal_stats.rx_bytes,
3150                                   &pf->internal_prev_rx_bytes);
3151         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3152                                   I40E_GLV_GOTCL(hw->port),
3153                                   pf->offset_loaded,
3154                                   &pf->internal_stats_offset.tx_bytes,
3155                                   &pf->internal_stats.tx_bytes,
3156                                   &pf->internal_prev_tx_bytes);
3157         /* Get total internal rx packet count */
3158         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3159                             I40E_GLV_UPRCL(hw->port),
3160                             pf->offset_loaded,
3161                             &pf->internal_stats_offset.rx_unicast,
3162                             &pf->internal_stats.rx_unicast);
3163         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3164                             I40E_GLV_MPRCL(hw->port),
3165                             pf->offset_loaded,
3166                             &pf->internal_stats_offset.rx_multicast,
3167                             &pf->internal_stats.rx_multicast);
3168         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3169                             I40E_GLV_BPRCL(hw->port),
3170                             pf->offset_loaded,
3171                             &pf->internal_stats_offset.rx_broadcast,
3172                             &pf->internal_stats.rx_broadcast);
3173         /* Get total internal tx packet count */
3174         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3175                             I40E_GLV_UPTCL(hw->port),
3176                             pf->offset_loaded,
3177                             &pf->internal_stats_offset.tx_unicast,
3178                             &pf->internal_stats.tx_unicast);
3179         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3180                             I40E_GLV_MPTCL(hw->port),
3181                             pf->offset_loaded,
3182                             &pf->internal_stats_offset.tx_multicast,
3183                             &pf->internal_stats.tx_multicast);
3184         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3185                             I40E_GLV_BPTCL(hw->port),
3186                             pf->offset_loaded,
3187                             &pf->internal_stats_offset.tx_broadcast,
3188                             &pf->internal_stats.tx_broadcast);
3189
3190         /* exclude CRC size */
3191         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3192                 pf->internal_stats.rx_multicast +
3193                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3194
3195         /* Get statistics of struct i40e_eth_stats */
3196         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3197                                   I40E_GLPRT_GORCL(hw->port),
3198                                   pf->offset_loaded, &os->eth.rx_bytes,
3199                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3200         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3201                             I40E_GLPRT_UPRCL(hw->port),
3202                             pf->offset_loaded, &os->eth.rx_unicast,
3203                             &ns->eth.rx_unicast);
3204         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3205                             I40E_GLPRT_MPRCL(hw->port),
3206                             pf->offset_loaded, &os->eth.rx_multicast,
3207                             &ns->eth.rx_multicast);
3208         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3209                             I40E_GLPRT_BPRCL(hw->port),
3210                             pf->offset_loaded, &os->eth.rx_broadcast,
3211                             &ns->eth.rx_broadcast);
3212         /* Workaround: CRC size should not be included in byte statistics,
3213          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3214          * packet.
3215          */
3216         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3217                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3218
3219         /* exclude internal rx bytes
3220          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3221          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3222          * value.
3223          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3224          */
3225         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3226                 ns->eth.rx_bytes = 0;
3227         else
3228                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3229
3230         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3231                 ns->eth.rx_unicast = 0;
3232         else
3233                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3234
3235         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3236                 ns->eth.rx_multicast = 0;
3237         else
3238                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3239
3240         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3241                 ns->eth.rx_broadcast = 0;
3242         else
3243                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3244
3245         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3246                             pf->offset_loaded, &os->eth.rx_discards,
3247                             &ns->eth.rx_discards);
3248         /* GLPRT_REPC not supported */
3249         /* GLPRT_RMPC not supported */
3250         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3251                             pf->offset_loaded,
3252                             &os->eth.rx_unknown_protocol,
3253                             &ns->eth.rx_unknown_protocol);
3254         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3255                                   I40E_GLPRT_GOTCL(hw->port),
3256                                   pf->offset_loaded, &os->eth.tx_bytes,
3257                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3258         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3259                             I40E_GLPRT_UPTCL(hw->port),
3260                             pf->offset_loaded, &os->eth.tx_unicast,
3261                             &ns->eth.tx_unicast);
3262         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3263                             I40E_GLPRT_MPTCL(hw->port),
3264                             pf->offset_loaded, &os->eth.tx_multicast,
3265                             &ns->eth.tx_multicast);
3266         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3267                             I40E_GLPRT_BPTCL(hw->port),
3268                             pf->offset_loaded, &os->eth.tx_broadcast,
3269                             &ns->eth.tx_broadcast);
3270         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3271                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3272
3273         /* exclude internal tx bytes
3274          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3275          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3276          * value.
3277          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3278          */
3279         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3280                 ns->eth.tx_bytes = 0;
3281         else
3282                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3283
3284         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3285                 ns->eth.tx_unicast = 0;
3286         else
3287                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3288
3289         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3290                 ns->eth.tx_multicast = 0;
3291         else
3292                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3293
3294         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3295                 ns->eth.tx_broadcast = 0;
3296         else
3297                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3298
3299         /* GLPRT_TEPC not supported */
3300
3301         /* additional port specific stats */
3302         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3303                             pf->offset_loaded, &os->tx_dropped_link_down,
3304                             &ns->tx_dropped_link_down);
3305         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3306                             pf->offset_loaded, &os->crc_errors,
3307                             &ns->crc_errors);
3308         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3309                             pf->offset_loaded, &os->illegal_bytes,
3310                             &ns->illegal_bytes);
3311         /* GLPRT_ERRBC not supported */
3312         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3313                             pf->offset_loaded, &os->mac_local_faults,
3314                             &ns->mac_local_faults);
3315         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3316                             pf->offset_loaded, &os->mac_remote_faults,
3317                             &ns->mac_remote_faults);
3318         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3319                             pf->offset_loaded, &os->rx_length_errors,
3320                             &ns->rx_length_errors);
3321         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3322                             pf->offset_loaded, &os->link_xon_rx,
3323                             &ns->link_xon_rx);
3324         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3325                             pf->offset_loaded, &os->link_xoff_rx,
3326                             &ns->link_xoff_rx);
3327         for (i = 0; i < 8; i++) {
3328                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3329                                     pf->offset_loaded,
3330                                     &os->priority_xon_rx[i],
3331                                     &ns->priority_xon_rx[i]);
3332                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3333                                     pf->offset_loaded,
3334                                     &os->priority_xoff_rx[i],
3335                                     &ns->priority_xoff_rx[i]);
3336         }
3337         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3338                             pf->offset_loaded, &os->link_xon_tx,
3339                             &ns->link_xon_tx);
3340         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3341                             pf->offset_loaded, &os->link_xoff_tx,
3342                             &ns->link_xoff_tx);
3343         for (i = 0; i < 8; i++) {
3344                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3345                                     pf->offset_loaded,
3346                                     &os->priority_xon_tx[i],
3347                                     &ns->priority_xon_tx[i]);
3348                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3349                                     pf->offset_loaded,
3350                                     &os->priority_xoff_tx[i],
3351                                     &ns->priority_xoff_tx[i]);
3352                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3353                                     pf->offset_loaded,
3354                                     &os->priority_xon_2_xoff[i],
3355                                     &ns->priority_xon_2_xoff[i]);
3356         }
3357         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3358                             I40E_GLPRT_PRC64L(hw->port),
3359                             pf->offset_loaded, &os->rx_size_64,
3360                             &ns->rx_size_64);
3361         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3362                             I40E_GLPRT_PRC127L(hw->port),
3363                             pf->offset_loaded, &os->rx_size_127,
3364                             &ns->rx_size_127);
3365         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3366                             I40E_GLPRT_PRC255L(hw->port),
3367                             pf->offset_loaded, &os->rx_size_255,
3368                             &ns->rx_size_255);
3369         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3370                             I40E_GLPRT_PRC511L(hw->port),
3371                             pf->offset_loaded, &os->rx_size_511,
3372                             &ns->rx_size_511);
3373         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3374                             I40E_GLPRT_PRC1023L(hw->port),
3375                             pf->offset_loaded, &os->rx_size_1023,
3376                             &ns->rx_size_1023);
3377         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3378                             I40E_GLPRT_PRC1522L(hw->port),
3379                             pf->offset_loaded, &os->rx_size_1522,
3380                             &ns->rx_size_1522);
3381         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3382                             I40E_GLPRT_PRC9522L(hw->port),
3383                             pf->offset_loaded, &os->rx_size_big,
3384                             &ns->rx_size_big);
3385         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3386                             pf->offset_loaded, &os->rx_undersize,
3387                             &ns->rx_undersize);
3388         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3389                             pf->offset_loaded, &os->rx_fragments,
3390                             &ns->rx_fragments);
3391         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3392                             pf->offset_loaded, &os->rx_oversize,
3393                             &ns->rx_oversize);
3394         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3395                             pf->offset_loaded, &os->rx_jabber,
3396                             &ns->rx_jabber);
3397         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3398                             I40E_GLPRT_PTC64L(hw->port),
3399                             pf->offset_loaded, &os->tx_size_64,
3400                             &ns->tx_size_64);
3401         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3402                             I40E_GLPRT_PTC127L(hw->port),
3403                             pf->offset_loaded, &os->tx_size_127,
3404                             &ns->tx_size_127);
3405         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3406                             I40E_GLPRT_PTC255L(hw->port),
3407                             pf->offset_loaded, &os->tx_size_255,
3408                             &ns->tx_size_255);
3409         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3410                             I40E_GLPRT_PTC511L(hw->port),
3411                             pf->offset_loaded, &os->tx_size_511,
3412                             &ns->tx_size_511);
3413         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3414                             I40E_GLPRT_PTC1023L(hw->port),
3415                             pf->offset_loaded, &os->tx_size_1023,
3416                             &ns->tx_size_1023);
3417         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3418                             I40E_GLPRT_PTC1522L(hw->port),
3419                             pf->offset_loaded, &os->tx_size_1522,
3420                             &ns->tx_size_1522);
3421         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3422                             I40E_GLPRT_PTC9522L(hw->port),
3423                             pf->offset_loaded, &os->tx_size_big,
3424                             &ns->tx_size_big);
3425         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3426                            pf->offset_loaded,
3427                            &os->fd_sb_match, &ns->fd_sb_match);
3428         /* GLPRT_MSPDC not supported */
3429         /* GLPRT_XEC not supported */
3430
3431         pf->offset_loaded = true;
3432
3433         if (pf->main_vsi)
3434                 i40e_update_vsi_stats(pf->main_vsi);
3435 }
3436
3437 /* Get all statistics of a port */
3438 static int
3439 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3440 {
3441         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3442         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3443         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3444         struct i40e_vsi *vsi;
3445         unsigned i;
3446
3447         /* call read registers - updates values, now write them to struct */
3448         i40e_read_stats_registers(pf, hw);
3449
3450         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3451                         pf->main_vsi->eth_stats.rx_multicast +
3452                         pf->main_vsi->eth_stats.rx_broadcast -
3453                         pf->main_vsi->eth_stats.rx_discards;
3454         stats->opackets = ns->eth.tx_unicast +
3455                         ns->eth.tx_multicast +
3456                         ns->eth.tx_broadcast;
3457         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3458         stats->obytes   = ns->eth.tx_bytes;
3459         stats->oerrors  = ns->eth.tx_errors +
3460                         pf->main_vsi->eth_stats.tx_errors;
3461
3462         /* Rx Errors */
3463         stats->imissed  = ns->eth.rx_discards +
3464                         pf->main_vsi->eth_stats.rx_discards;
3465         stats->ierrors  = ns->crc_errors +
3466                         ns->rx_length_errors + ns->rx_undersize +
3467                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3468
3469         if (pf->vfs) {
3470                 for (i = 0; i < pf->vf_num; i++) {
3471                         vsi = pf->vfs[i].vsi;
3472                         i40e_update_vsi_stats(vsi);
3473
3474                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3475                                         vsi->eth_stats.rx_multicast +
3476                                         vsi->eth_stats.rx_broadcast -
3477                                         vsi->eth_stats.rx_discards);
3478                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3479                         stats->oerrors  += vsi->eth_stats.tx_errors;
3480                         stats->imissed  += vsi->eth_stats.rx_discards;
3481                 }
3482         }
3483
3484         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3485         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3486         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3487         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3488         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3489         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3490         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3491                     ns->eth.rx_unknown_protocol);
3492         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3493         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3494         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3495         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3496         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3497         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3498
3499         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3500                     ns->tx_dropped_link_down);
3501         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3502         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3503                     ns->illegal_bytes);
3504         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3505         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3506                     ns->mac_local_faults);
3507         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3508                     ns->mac_remote_faults);
3509         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3510                     ns->rx_length_errors);
3511         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3512         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3513         for (i = 0; i < 8; i++) {
3514                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3515                                 i, ns->priority_xon_rx[i]);
3516                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3517                                 i, ns->priority_xoff_rx[i]);
3518         }
3519         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3520         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3521         for (i = 0; i < 8; i++) {
3522                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3523                                 i, ns->priority_xon_tx[i]);
3524                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3525                                 i, ns->priority_xoff_tx[i]);
3526                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3527                                 i, ns->priority_xon_2_xoff[i]);
3528         }
3529         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3530         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3531         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3532         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3533         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3534         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3535         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3536         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3537         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3538         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3539         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3540         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3541         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3542         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3543         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3544         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3545         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3546         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3547         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3548                         ns->mac_short_packet_dropped);
3549         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3550                     ns->checksum_error);
3551         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3552         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3553         return 0;
3554 }
3555
3556 /* Reset the statistics */
3557 static int
3558 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3559 {
3560         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3561         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3562
3563         /* Mark PF and VSI stats to update the offset, aka "reset" */
3564         pf->offset_loaded = false;
3565         if (pf->main_vsi)
3566                 pf->main_vsi->offset_loaded = false;
3567
3568         /* read the stats, reading current register values into offset */
3569         i40e_read_stats_registers(pf, hw);
3570
3571         return 0;
3572 }
3573
3574 static uint32_t
3575 i40e_xstats_calc_num(void)
3576 {
3577         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3578                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3579                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3580 }
3581
3582 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3583                                      struct rte_eth_xstat_name *xstats_names,
3584                                      __rte_unused unsigned limit)
3585 {
3586         unsigned count = 0;
3587         unsigned i, prio;
3588
3589         if (xstats_names == NULL)
3590                 return i40e_xstats_calc_num();
3591
3592         /* Note: limit checked in rte_eth_xstats_names() */
3593
3594         /* Get stats from i40e_eth_stats struct */
3595         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3596                 strlcpy(xstats_names[count].name,
3597                         rte_i40e_stats_strings[i].name,
3598                         sizeof(xstats_names[count].name));
3599                 count++;
3600         }
3601
3602         /* Get individiual stats from i40e_hw_port struct */
3603         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3604                 strlcpy(xstats_names[count].name,
3605                         rte_i40e_hw_port_strings[i].name,
3606                         sizeof(xstats_names[count].name));
3607                 count++;
3608         }
3609
3610         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3611                 for (prio = 0; prio < 8; prio++) {
3612                         snprintf(xstats_names[count].name,
3613                                  sizeof(xstats_names[count].name),
3614                                  "rx_priority%u_%s", prio,
3615                                  rte_i40e_rxq_prio_strings[i].name);
3616                         count++;
3617                 }
3618         }
3619
3620         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3621                 for (prio = 0; prio < 8; prio++) {
3622                         snprintf(xstats_names[count].name,
3623                                  sizeof(xstats_names[count].name),
3624                                  "tx_priority%u_%s", prio,
3625                                  rte_i40e_txq_prio_strings[i].name);
3626                         count++;
3627                 }
3628         }
3629         return count;
3630 }
3631
3632 static int
3633 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3634                     unsigned n)
3635 {
3636         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3637         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3638         unsigned i, count, prio;
3639         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3640
3641         count = i40e_xstats_calc_num();
3642         if (n < count)
3643                 return count;
3644
3645         i40e_read_stats_registers(pf, hw);
3646
3647         if (xstats == NULL)
3648                 return 0;
3649
3650         count = 0;
3651
3652         /* Get stats from i40e_eth_stats struct */
3653         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3654                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3655                         rte_i40e_stats_strings[i].offset);
3656                 xstats[count].id = count;
3657                 count++;
3658         }
3659
3660         /* Get individiual stats from i40e_hw_port struct */
3661         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3662                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3663                         rte_i40e_hw_port_strings[i].offset);
3664                 xstats[count].id = count;
3665                 count++;
3666         }
3667
3668         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3669                 for (prio = 0; prio < 8; prio++) {
3670                         xstats[count].value =
3671                                 *(uint64_t *)(((char *)hw_stats) +
3672                                 rte_i40e_rxq_prio_strings[i].offset +
3673                                 (sizeof(uint64_t) * prio));
3674                         xstats[count].id = count;
3675                         count++;
3676                 }
3677         }
3678
3679         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3680                 for (prio = 0; prio < 8; prio++) {
3681                         xstats[count].value =
3682                                 *(uint64_t *)(((char *)hw_stats) +
3683                                 rte_i40e_txq_prio_strings[i].offset +
3684                                 (sizeof(uint64_t) * prio));
3685                         xstats[count].id = count;
3686                         count++;
3687                 }
3688         }
3689
3690         return count;
3691 }
3692
3693 static int
3694 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3695 {
3696         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3697         u32 full_ver;
3698         u8 ver, patch;
3699         u16 build;
3700         int ret;
3701
3702         full_ver = hw->nvm.oem_ver;
3703         ver = (u8)(full_ver >> 24);
3704         build = (u16)((full_ver >> 8) & 0xffff);
3705         patch = (u8)(full_ver & 0xff);
3706
3707         ret = snprintf(fw_version, fw_size,
3708                  "%d.%d%d 0x%08x %d.%d.%d",
3709                  ((hw->nvm.version >> 12) & 0xf),
3710                  ((hw->nvm.version >> 4) & 0xff),
3711                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3712                  ver, build, patch);
3713
3714         ret += 1; /* add the size of '\0' */
3715         if (fw_size < (u32)ret)
3716                 return ret;
3717         else
3718                 return 0;
3719 }
3720
3721 /*
3722  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3723  * the Rx data path does not hang if the FW LLDP is stopped.
3724  * return true if lldp need to stop
3725  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3726  */
3727 static bool
3728 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3729 {
3730         double nvm_ver;
3731         char ver_str[64] = {0};
3732         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3733
3734         i40e_fw_version_get(dev, ver_str, 64);
3735         nvm_ver = atof(ver_str);
3736         if ((hw->mac.type == I40E_MAC_X722 ||
3737              hw->mac.type == I40E_MAC_X722_VF) &&
3738              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3739                 return true;
3740         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3741                 return true;
3742
3743         return false;
3744 }
3745
3746 static int
3747 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3748 {
3749         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3750         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3751         struct i40e_vsi *vsi = pf->main_vsi;
3752         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3753
3754         dev_info->max_rx_queues = vsi->nb_qps;
3755         dev_info->max_tx_queues = vsi->nb_qps;
3756         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3757         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3758         dev_info->max_mac_addrs = vsi->max_macaddrs;
3759         dev_info->max_vfs = pci_dev->max_vfs;
3760         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3761         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3762         dev_info->rx_queue_offload_capa = 0;
3763         dev_info->rx_offload_capa =
3764                 DEV_RX_OFFLOAD_VLAN_STRIP |
3765                 DEV_RX_OFFLOAD_QINQ_STRIP |
3766                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3767                 DEV_RX_OFFLOAD_UDP_CKSUM |
3768                 DEV_RX_OFFLOAD_TCP_CKSUM |
3769                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3770                 DEV_RX_OFFLOAD_KEEP_CRC |
3771                 DEV_RX_OFFLOAD_SCATTER |
3772                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3773                 DEV_RX_OFFLOAD_VLAN_FILTER |
3774                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3775                 DEV_RX_OFFLOAD_RSS_HASH;
3776
3777         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3778         dev_info->tx_offload_capa =
3779                 DEV_TX_OFFLOAD_VLAN_INSERT |
3780                 DEV_TX_OFFLOAD_QINQ_INSERT |
3781                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3782                 DEV_TX_OFFLOAD_UDP_CKSUM |
3783                 DEV_TX_OFFLOAD_TCP_CKSUM |
3784                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3785                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3786                 DEV_TX_OFFLOAD_TCP_TSO |
3787                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3788                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3789                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3790                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3791                 DEV_TX_OFFLOAD_MULTI_SEGS |
3792                 dev_info->tx_queue_offload_capa;
3793         dev_info->dev_capa =
3794                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3795                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3796
3797         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3798                                                 sizeof(uint32_t);
3799         dev_info->reta_size = pf->hash_lut_size;
3800         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3801
3802         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3803                 .rx_thresh = {
3804                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3805                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3806                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3807                 },
3808                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3809                 .rx_drop_en = 0,
3810                 .offloads = 0,
3811         };
3812
3813         dev_info->default_txconf = (struct rte_eth_txconf) {
3814                 .tx_thresh = {
3815                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3816                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3817                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3818                 },
3819                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3820                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3821                 .offloads = 0,
3822         };
3823
3824         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3825                 .nb_max = I40E_MAX_RING_DESC,
3826                 .nb_min = I40E_MIN_RING_DESC,
3827                 .nb_align = I40E_ALIGN_RING_DESC,
3828         };
3829
3830         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3831                 .nb_max = I40E_MAX_RING_DESC,
3832                 .nb_min = I40E_MIN_RING_DESC,
3833                 .nb_align = I40E_ALIGN_RING_DESC,
3834                 .nb_seg_max = I40E_TX_MAX_SEG,
3835                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3836         };
3837
3838         if (pf->flags & I40E_FLAG_VMDQ) {
3839                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3840                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3841                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3842                                                 pf->max_nb_vmdq_vsi;
3843                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3844                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3845                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3846         }
3847
3848         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3849                 /* For XL710 */
3850                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3851                 dev_info->default_rxportconf.nb_queues = 2;
3852                 dev_info->default_txportconf.nb_queues = 2;
3853                 if (dev->data->nb_rx_queues == 1)
3854                         dev_info->default_rxportconf.ring_size = 2048;
3855                 else
3856                         dev_info->default_rxportconf.ring_size = 1024;
3857                 if (dev->data->nb_tx_queues == 1)
3858                         dev_info->default_txportconf.ring_size = 1024;
3859                 else
3860                         dev_info->default_txportconf.ring_size = 512;
3861
3862         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3863                 /* For XXV710 */
3864                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3865                 dev_info->default_rxportconf.nb_queues = 1;
3866                 dev_info->default_txportconf.nb_queues = 1;
3867                 dev_info->default_rxportconf.ring_size = 256;
3868                 dev_info->default_txportconf.ring_size = 256;
3869         } else {
3870                 /* For X710 */
3871                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3872                 dev_info->default_rxportconf.nb_queues = 1;
3873                 dev_info->default_txportconf.nb_queues = 1;
3874                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3875                         dev_info->default_rxportconf.ring_size = 512;
3876                         dev_info->default_txportconf.ring_size = 256;
3877                 } else {
3878                         dev_info->default_rxportconf.ring_size = 256;
3879                         dev_info->default_txportconf.ring_size = 256;
3880                 }
3881         }
3882         dev_info->default_rxportconf.burst_size = 32;
3883         dev_info->default_txportconf.burst_size = 32;
3884
3885         return 0;
3886 }
3887
3888 static int
3889 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3890 {
3891         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3892         struct i40e_vsi *vsi = pf->main_vsi;
3893         PMD_INIT_FUNC_TRACE();
3894
3895         if (on)
3896                 return i40e_vsi_add_vlan(vsi, vlan_id);
3897         else
3898                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3899 }
3900
3901 static int
3902 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3903                                 enum rte_vlan_type vlan_type,
3904                                 uint16_t tpid, int qinq)
3905 {
3906         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3907         uint64_t reg_r = 0;
3908         uint64_t reg_w = 0;
3909         uint16_t reg_id = 3;
3910         int ret;
3911
3912         if (qinq) {
3913                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3914                         reg_id = 2;
3915         }
3916
3917         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3918                                           &reg_r, NULL);
3919         if (ret != I40E_SUCCESS) {
3920                 PMD_DRV_LOG(ERR,
3921                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3922                            reg_id);
3923                 return -EIO;
3924         }
3925         PMD_DRV_LOG(DEBUG,
3926                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3927                     reg_id, reg_r);
3928
3929         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3930         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3931         if (reg_r == reg_w) {
3932                 PMD_DRV_LOG(DEBUG, "No need to write");
3933                 return 0;
3934         }
3935
3936         ret = i40e_aq_debug_write_global_register(hw,
3937                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3938                                            reg_w, NULL);
3939         if (ret != I40E_SUCCESS) {
3940                 PMD_DRV_LOG(ERR,
3941                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3942                             reg_id);
3943                 return -EIO;
3944         }
3945         PMD_DRV_LOG(DEBUG,
3946                     "Global register 0x%08x is changed with value 0x%08x",
3947                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3948
3949         return 0;
3950 }
3951
3952 static int
3953 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3954                    enum rte_vlan_type vlan_type,
3955                    uint16_t tpid)
3956 {
3957         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3958         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3959         int qinq = dev->data->dev_conf.rxmode.offloads &
3960                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3961         int ret = 0;
3962
3963         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3964              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3965             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3966                 PMD_DRV_LOG(ERR,
3967                             "Unsupported vlan type.");
3968                 return -EINVAL;
3969         }
3970
3971         if (pf->support_multi_driver) {
3972                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3973                 return -ENOTSUP;
3974         }
3975
3976         /* 802.1ad frames ability is added in NVM API 1.7*/
3977         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3978                 if (qinq) {
3979                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3980                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3981                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3982                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3983                 } else {
3984                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3985                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3986                 }
3987                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3988                 if (ret != I40E_SUCCESS) {
3989                         PMD_DRV_LOG(ERR,
3990                                     "Set switch config failed aq_err: %d",
3991                                     hw->aq.asq_last_status);
3992                         ret = -EIO;
3993                 }
3994         } else
3995                 /* If NVM API < 1.7, keep the register setting */
3996                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3997                                                       tpid, qinq);
3998
3999         return ret;
4000 }
4001
4002 /* Configure outer vlan stripping on or off in QinQ mode */
4003 static int
4004 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
4005 {
4006         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4007         int ret = I40E_SUCCESS;
4008         uint32_t reg;
4009
4010         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
4011                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
4012                 return -EINVAL;
4013         }
4014
4015         /* Configure for outer VLAN RX stripping */
4016         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4017
4018         if (on)
4019                 reg |= I40E_VSI_TSR_QINQ_STRIP;
4020         else
4021                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4022
4023         ret = i40e_aq_debug_write_register(hw,
4024                                                    I40E_VSI_TSR(vsi->vsi_id),
4025                                                    reg, NULL);
4026         if (ret < 0) {
4027                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4028                                     vsi->vsi_id);
4029                 return I40E_ERR_CONFIG;
4030         }
4031
4032         return ret;
4033 }
4034
4035 static int
4036 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4037 {
4038         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4039         struct i40e_vsi *vsi = pf->main_vsi;
4040         struct rte_eth_rxmode *rxmode;
4041
4042         rxmode = &dev->data->dev_conf.rxmode;
4043         if (mask & ETH_VLAN_FILTER_MASK) {
4044                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4045                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4046                 else
4047                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4048         }
4049
4050         if (mask & ETH_VLAN_STRIP_MASK) {
4051                 /* Enable or disable VLAN stripping */
4052                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4053                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4054                 else
4055                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4056         }
4057
4058         if (mask & ETH_VLAN_EXTEND_MASK) {
4059                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4060                         i40e_vsi_config_double_vlan(vsi, TRUE);
4061                         /* Set global registers with default ethertype. */
4062                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4063                                            RTE_ETHER_TYPE_VLAN);
4064                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4065                                            RTE_ETHER_TYPE_VLAN);
4066                 }
4067                 else
4068                         i40e_vsi_config_double_vlan(vsi, FALSE);
4069         }
4070
4071         if (mask & ETH_QINQ_STRIP_MASK) {
4072                 /* Enable or disable outer VLAN stripping */
4073                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4074                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4075                 else
4076                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4077         }
4078
4079         return 0;
4080 }
4081
4082 static void
4083 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4084                           __rte_unused uint16_t queue,
4085                           __rte_unused int on)
4086 {
4087         PMD_INIT_FUNC_TRACE();
4088 }
4089
4090 static int
4091 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4092 {
4093         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4094         struct i40e_vsi *vsi = pf->main_vsi;
4095         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4096         struct i40e_vsi_vlan_pvid_info info;
4097
4098         memset(&info, 0, sizeof(info));
4099         info.on = on;
4100         if (info.on)
4101                 info.config.pvid = pvid;
4102         else {
4103                 info.config.reject.tagged =
4104                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4105                 info.config.reject.untagged =
4106                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4107         }
4108
4109         return i40e_vsi_vlan_pvid_set(vsi, &info);
4110 }
4111
4112 static int
4113 i40e_dev_led_on(struct rte_eth_dev *dev)
4114 {
4115         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4116         uint32_t mode = i40e_led_get(hw);
4117
4118         if (mode == 0)
4119                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4120
4121         return 0;
4122 }
4123
4124 static int
4125 i40e_dev_led_off(struct rte_eth_dev *dev)
4126 {
4127         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4128         uint32_t mode = i40e_led_get(hw);
4129
4130         if (mode != 0)
4131                 i40e_led_set(hw, 0, false);
4132
4133         return 0;
4134 }
4135
4136 static int
4137 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4138 {
4139         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4140         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4141
4142         fc_conf->pause_time = pf->fc_conf.pause_time;
4143
4144         /* read out from register, in case they are modified by other port */
4145         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4146                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4147         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4148                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4149
4150         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4151         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4152
4153          /* Return current mode according to actual setting*/
4154         switch (hw->fc.current_mode) {
4155         case I40E_FC_FULL:
4156                 fc_conf->mode = RTE_FC_FULL;
4157                 break;
4158         case I40E_FC_TX_PAUSE:
4159                 fc_conf->mode = RTE_FC_TX_PAUSE;
4160                 break;
4161         case I40E_FC_RX_PAUSE:
4162                 fc_conf->mode = RTE_FC_RX_PAUSE;
4163                 break;
4164         case I40E_FC_NONE:
4165         default:
4166                 fc_conf->mode = RTE_FC_NONE;
4167         };
4168
4169         return 0;
4170 }
4171
4172 static int
4173 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4174 {
4175         uint32_t mflcn_reg, fctrl_reg, reg;
4176         uint32_t max_high_water;
4177         uint8_t i, aq_failure;
4178         int err;
4179         struct i40e_hw *hw;
4180         struct i40e_pf *pf;
4181         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4182                 [RTE_FC_NONE] = I40E_FC_NONE,
4183                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4184                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4185                 [RTE_FC_FULL] = I40E_FC_FULL
4186         };
4187
4188         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4189
4190         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4191         if ((fc_conf->high_water > max_high_water) ||
4192                         (fc_conf->high_water < fc_conf->low_water)) {
4193                 PMD_INIT_LOG(ERR,
4194                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4195                         max_high_water);
4196                 return -EINVAL;
4197         }
4198
4199         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4200         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4201         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4202
4203         pf->fc_conf.pause_time = fc_conf->pause_time;
4204         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4205         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4206
4207         PMD_INIT_FUNC_TRACE();
4208
4209         /* All the link flow control related enable/disable register
4210          * configuration is handle by the F/W
4211          */
4212         err = i40e_set_fc(hw, &aq_failure, true);
4213         if (err < 0)
4214                 return -ENOSYS;
4215
4216         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4217                 /* Configure flow control refresh threshold,
4218                  * the value for stat_tx_pause_refresh_timer[8]
4219                  * is used for global pause operation.
4220                  */
4221
4222                 I40E_WRITE_REG(hw,
4223                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4224                                pf->fc_conf.pause_time);
4225
4226                 /* configure the timer value included in transmitted pause
4227                  * frame,
4228                  * the value for stat_tx_pause_quanta[8] is used for global
4229                  * pause operation
4230                  */
4231                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4232                                pf->fc_conf.pause_time);
4233
4234                 fctrl_reg = I40E_READ_REG(hw,
4235                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4236
4237                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4238                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4239                 else
4240                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4241
4242                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4243                                fctrl_reg);
4244         } else {
4245                 /* Configure pause time (2 TCs per register) */
4246                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4247                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4248                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4249
4250                 /* Configure flow control refresh threshold value */
4251                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4252                                pf->fc_conf.pause_time / 2);
4253
4254                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4255
4256                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4257                  *depending on configuration
4258                  */
4259                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4260                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4261                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4262                 } else {
4263                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4264                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4265                 }
4266
4267                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4268         }
4269
4270         if (!pf->support_multi_driver) {
4271                 /* config water marker both based on the packets and bytes */
4272                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4273                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4274                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4275                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4276                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4277                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4278                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4279                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4280                                   << I40E_KILOSHIFT);
4281                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4282                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4283                                    << I40E_KILOSHIFT);
4284         } else {
4285                 PMD_DRV_LOG(ERR,
4286                             "Water marker configuration is not supported.");
4287         }
4288
4289         I40E_WRITE_FLUSH(hw);
4290
4291         return 0;
4292 }
4293
4294 static int
4295 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4296                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4297 {
4298         PMD_INIT_FUNC_TRACE();
4299
4300         return -ENOSYS;
4301 }
4302
4303 /* Add a MAC address, and update filters */
4304 static int
4305 i40e_macaddr_add(struct rte_eth_dev *dev,
4306                  struct rte_ether_addr *mac_addr,
4307                  __rte_unused uint32_t index,
4308                  uint32_t pool)
4309 {
4310         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4311         struct i40e_mac_filter_info mac_filter;
4312         struct i40e_vsi *vsi;
4313         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4314         int ret;
4315
4316         /* If VMDQ not enabled or configured, return */
4317         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4318                           !pf->nb_cfg_vmdq_vsi)) {
4319                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4320                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4321                         pool);
4322                 return -ENOTSUP;
4323         }
4324
4325         if (pool > pf->nb_cfg_vmdq_vsi) {
4326                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4327                                 pool, pf->nb_cfg_vmdq_vsi);
4328                 return -EINVAL;
4329         }
4330
4331         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4332         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4333                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4334         else
4335                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
4336
4337         if (pool == 0)
4338                 vsi = pf->main_vsi;
4339         else
4340                 vsi = pf->vmdq[pool - 1].vsi;
4341
4342         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4343         if (ret != I40E_SUCCESS) {
4344                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4345                 return -ENODEV;
4346         }
4347         return 0;
4348 }
4349
4350 /* Remove a MAC address, and update filters */
4351 static void
4352 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4353 {
4354         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4355         struct i40e_vsi *vsi;
4356         struct rte_eth_dev_data *data = dev->data;
4357         struct rte_ether_addr *macaddr;
4358         int ret;
4359         uint32_t i;
4360         uint64_t pool_sel;
4361
4362         macaddr = &(data->mac_addrs[index]);
4363
4364         pool_sel = dev->data->mac_pool_sel[index];
4365
4366         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4367                 if (pool_sel & (1ULL << i)) {
4368                         if (i == 0)
4369                                 vsi = pf->main_vsi;
4370                         else {
4371                                 /* No VMDQ pool enabled or configured */
4372                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4373                                         (i > pf->nb_cfg_vmdq_vsi)) {
4374                                         PMD_DRV_LOG(ERR,
4375                                                 "No VMDQ pool enabled/configured");
4376                                         return;
4377                                 }
4378                                 vsi = pf->vmdq[i - 1].vsi;
4379                         }
4380                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4381
4382                         if (ret) {
4383                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4384                                 return;
4385                         }
4386                 }
4387         }
4388 }
4389
4390 /* Set perfect match or hash match of MAC and VLAN for a VF */
4391 static int
4392 i40e_vf_mac_filter_set(struct i40e_pf *pf,
4393                  struct rte_eth_mac_filter *filter,
4394                  bool add)
4395 {
4396         struct i40e_hw *hw;
4397         struct i40e_mac_filter_info mac_filter;
4398         struct rte_ether_addr old_mac;
4399         struct rte_ether_addr *new_mac;
4400         struct i40e_pf_vf *vf = NULL;
4401         uint16_t vf_id;
4402         int ret;
4403
4404         if (pf == NULL) {
4405                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
4406                 return -EINVAL;
4407         }
4408         hw = I40E_PF_TO_HW(pf);
4409
4410         if (filter == NULL) {
4411                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
4412                 return -EINVAL;
4413         }
4414
4415         new_mac = &filter->mac_addr;
4416
4417         if (rte_is_zero_ether_addr(new_mac)) {
4418                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
4419                 return -EINVAL;
4420         }
4421
4422         vf_id = filter->dst_id;
4423
4424         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
4425                 PMD_DRV_LOG(ERR, "Invalid argument.");
4426                 return -EINVAL;
4427         }
4428         vf = &pf->vfs[vf_id];
4429
4430         if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) {
4431                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
4432                 return -EINVAL;
4433         }
4434
4435         if (add) {
4436                 rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
4437                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
4438                                 RTE_ETHER_ADDR_LEN);
4439                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
4440                                  RTE_ETHER_ADDR_LEN);
4441
4442                 mac_filter.filter_type = filter->filter_type;
4443                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
4444                 if (ret != I40E_SUCCESS) {
4445                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
4446                         return -1;
4447                 }
4448                 rte_ether_addr_copy(new_mac, &pf->dev_addr);
4449         } else {
4450                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
4451                                 RTE_ETHER_ADDR_LEN);
4452                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
4453                 if (ret != I40E_SUCCESS) {
4454                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
4455                         return -1;
4456                 }
4457
4458                 /* Clear device address as it has been removed */
4459                 if (rte_is_same_ether_addr(&pf->dev_addr, new_mac))
4460                         memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr));
4461         }
4462
4463         return 0;
4464 }
4465
4466 /* MAC filter handle */
4467 static int
4468 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4469                 void *arg)
4470 {
4471         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4472         struct rte_eth_mac_filter *filter;
4473         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4474         int ret = I40E_NOT_SUPPORTED;
4475
4476         filter = (struct rte_eth_mac_filter *)(arg);
4477
4478         switch (filter_op) {
4479         case RTE_ETH_FILTER_NOP:
4480                 ret = I40E_SUCCESS;
4481                 break;
4482         case RTE_ETH_FILTER_ADD:
4483                 i40e_pf_disable_irq0(hw);
4484                 if (filter->is_vf)
4485                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
4486                 i40e_pf_enable_irq0(hw);
4487                 break;
4488         case RTE_ETH_FILTER_DELETE:
4489                 i40e_pf_disable_irq0(hw);
4490                 if (filter->is_vf)
4491                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
4492                 i40e_pf_enable_irq0(hw);
4493                 break;
4494         default:
4495                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4496                 ret = I40E_ERR_PARAM;
4497                 break;
4498         }
4499
4500         return ret;
4501 }
4502
4503 static int
4504 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4505 {
4506         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4507         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4508         uint32_t reg;
4509         int ret;
4510
4511         if (!lut)
4512                 return -EINVAL;
4513
4514         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4515                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4516                                           vsi->type != I40E_VSI_SRIOV,
4517                                           lut, lut_size);
4518                 if (ret) {
4519                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4520                         return ret;
4521                 }
4522         } else {
4523                 uint32_t *lut_dw = (uint32_t *)lut;
4524                 uint16_t i, lut_size_dw = lut_size / 4;
4525
4526                 if (vsi->type == I40E_VSI_SRIOV) {
4527                         for (i = 0; i <= lut_size_dw; i++) {
4528                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4529                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4530                         }
4531                 } else {
4532                         for (i = 0; i < lut_size_dw; i++)
4533                                 lut_dw[i] = I40E_READ_REG(hw,
4534                                                           I40E_PFQF_HLUT(i));
4535                 }
4536         }
4537
4538         return 0;
4539 }
4540
4541 int
4542 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4543 {
4544         struct i40e_pf *pf;
4545         struct i40e_hw *hw;
4546         int ret;
4547
4548         if (!vsi || !lut)
4549                 return -EINVAL;
4550
4551         pf = I40E_VSI_TO_PF(vsi);
4552         hw = I40E_VSI_TO_HW(vsi);
4553
4554         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4555                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4556                                           vsi->type != I40E_VSI_SRIOV,
4557                                           lut, lut_size);
4558                 if (ret) {
4559                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4560                         return ret;
4561                 }
4562         } else {
4563                 uint32_t *lut_dw = (uint32_t *)lut;
4564                 uint16_t i, lut_size_dw = lut_size / 4;
4565
4566                 if (vsi->type == I40E_VSI_SRIOV) {
4567                         for (i = 0; i < lut_size_dw; i++)
4568                                 I40E_WRITE_REG(
4569                                         hw,
4570                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4571                                         lut_dw[i]);
4572                 } else {
4573                         for (i = 0; i < lut_size_dw; i++)
4574                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4575                                                lut_dw[i]);
4576                 }
4577                 I40E_WRITE_FLUSH(hw);
4578         }
4579
4580         return 0;
4581 }
4582
4583 static int
4584 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4585                          struct rte_eth_rss_reta_entry64 *reta_conf,
4586                          uint16_t reta_size)
4587 {
4588         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4589         uint16_t i, lut_size = pf->hash_lut_size;
4590         uint16_t idx, shift;
4591         uint8_t *lut;
4592         int ret;
4593
4594         if (reta_size != lut_size ||
4595                 reta_size > ETH_RSS_RETA_SIZE_512) {
4596                 PMD_DRV_LOG(ERR,
4597                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4598                         reta_size, lut_size);
4599                 return -EINVAL;
4600         }
4601
4602         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4603         if (!lut) {
4604                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4605                 return -ENOMEM;
4606         }
4607         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4608         if (ret)
4609                 goto out;
4610         for (i = 0; i < reta_size; i++) {
4611                 idx = i / RTE_RETA_GROUP_SIZE;
4612                 shift = i % RTE_RETA_GROUP_SIZE;
4613                 if (reta_conf[idx].mask & (1ULL << shift))
4614                         lut[i] = reta_conf[idx].reta[shift];
4615         }
4616         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4617
4618         pf->adapter->rss_reta_updated = 1;
4619
4620 out:
4621         rte_free(lut);
4622
4623         return ret;
4624 }
4625
4626 static int
4627 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4628                         struct rte_eth_rss_reta_entry64 *reta_conf,
4629                         uint16_t reta_size)
4630 {
4631         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4632         uint16_t i, lut_size = pf->hash_lut_size;
4633         uint16_t idx, shift;
4634         uint8_t *lut;
4635         int ret;
4636
4637         if (reta_size != lut_size ||
4638                 reta_size > ETH_RSS_RETA_SIZE_512) {
4639                 PMD_DRV_LOG(ERR,
4640                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4641                         reta_size, lut_size);
4642                 return -EINVAL;
4643         }
4644
4645         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4646         if (!lut) {
4647                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4648                 return -ENOMEM;
4649         }
4650
4651         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4652         if (ret)
4653                 goto out;
4654         for (i = 0; i < reta_size; i++) {
4655                 idx = i / RTE_RETA_GROUP_SIZE;
4656                 shift = i % RTE_RETA_GROUP_SIZE;
4657                 if (reta_conf[idx].mask & (1ULL << shift))
4658                         reta_conf[idx].reta[shift] = lut[i];
4659         }
4660
4661 out:
4662         rte_free(lut);
4663
4664         return ret;
4665 }
4666
4667 /**
4668  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4669  * @hw:   pointer to the HW structure
4670  * @mem:  pointer to mem struct to fill out
4671  * @size: size of memory requested
4672  * @alignment: what to align the allocation to
4673  **/
4674 enum i40e_status_code
4675 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4676                         struct i40e_dma_mem *mem,
4677                         u64 size,
4678                         u32 alignment)
4679 {
4680         const struct rte_memzone *mz = NULL;
4681         char z_name[RTE_MEMZONE_NAMESIZE];
4682
4683         if (!mem)
4684                 return I40E_ERR_PARAM;
4685
4686         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4687         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4688                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4689         if (!mz)
4690                 return I40E_ERR_NO_MEMORY;
4691
4692         mem->size = size;
4693         mem->va = mz->addr;
4694         mem->pa = mz->iova;
4695         mem->zone = (const void *)mz;
4696         PMD_DRV_LOG(DEBUG,
4697                 "memzone %s allocated with physical address: %"PRIu64,
4698                 mz->name, mem->pa);
4699
4700         return I40E_SUCCESS;
4701 }
4702
4703 /**
4704  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4705  * @hw:   pointer to the HW structure
4706  * @mem:  ptr to mem struct to free
4707  **/
4708 enum i40e_status_code
4709 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4710                     struct i40e_dma_mem *mem)
4711 {
4712         if (!mem)
4713                 return I40E_ERR_PARAM;
4714
4715         PMD_DRV_LOG(DEBUG,
4716                 "memzone %s to be freed with physical address: %"PRIu64,
4717                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4718         rte_memzone_free((const struct rte_memzone *)mem->zone);
4719         mem->zone = NULL;
4720         mem->va = NULL;
4721         mem->pa = (u64)0;
4722
4723         return I40E_SUCCESS;
4724 }
4725
4726 /**
4727  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4728  * @hw:   pointer to the HW structure
4729  * @mem:  pointer to mem struct to fill out
4730  * @size: size of memory requested
4731  **/
4732 enum i40e_status_code
4733 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4734                          struct i40e_virt_mem *mem,
4735                          u32 size)
4736 {
4737         if (!mem)
4738                 return I40E_ERR_PARAM;
4739
4740         mem->size = size;
4741         mem->va = rte_zmalloc("i40e", size, 0);
4742
4743         if (mem->va)
4744                 return I40E_SUCCESS;
4745         else
4746                 return I40E_ERR_NO_MEMORY;
4747 }
4748
4749 /**
4750  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4751  * @hw:   pointer to the HW structure
4752  * @mem:  pointer to mem struct to free
4753  **/
4754 enum i40e_status_code
4755 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4756                      struct i40e_virt_mem *mem)
4757 {
4758         if (!mem)
4759                 return I40E_ERR_PARAM;
4760
4761         rte_free(mem->va);
4762         mem->va = NULL;
4763
4764         return I40E_SUCCESS;
4765 }
4766
4767 void
4768 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4769 {
4770         rte_spinlock_init(&sp->spinlock);
4771 }
4772
4773 void
4774 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4775 {
4776         rte_spinlock_lock(&sp->spinlock);
4777 }
4778
4779 void
4780 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4781 {
4782         rte_spinlock_unlock(&sp->spinlock);
4783 }
4784
4785 void
4786 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4787 {
4788         return;
4789 }
4790
4791 /**
4792  * Get the hardware capabilities, which will be parsed
4793  * and saved into struct i40e_hw.
4794  */
4795 static int
4796 i40e_get_cap(struct i40e_hw *hw)
4797 {
4798         struct i40e_aqc_list_capabilities_element_resp *buf;
4799         uint16_t len, size = 0;
4800         int ret;
4801
4802         /* Calculate a huge enough buff for saving response data temporarily */
4803         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4804                                                 I40E_MAX_CAP_ELE_NUM;
4805         buf = rte_zmalloc("i40e", len, 0);
4806         if (!buf) {
4807                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4808                 return I40E_ERR_NO_MEMORY;
4809         }
4810
4811         /* Get, parse the capabilities and save it to hw */
4812         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4813                         i40e_aqc_opc_list_func_capabilities, NULL);
4814         if (ret != I40E_SUCCESS)
4815                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4816
4817         /* Free the temporary buffer after being used */
4818         rte_free(buf);
4819
4820         return ret;
4821 }
4822
4823 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4824
4825 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4826                 const char *value,
4827                 void *opaque)
4828 {
4829         struct i40e_pf *pf;
4830         unsigned long num;
4831         char *end;
4832
4833         pf = (struct i40e_pf *)opaque;
4834         RTE_SET_USED(key);
4835
4836         errno = 0;
4837         num = strtoul(value, &end, 0);
4838         if (errno != 0 || end == value || *end != 0) {
4839                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4840                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4841                 return -(EINVAL);
4842         }
4843
4844         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4845                 pf->vf_nb_qp_max = (uint16_t)num;
4846         else
4847                 /* here return 0 to make next valid same argument work */
4848                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4849                             "power of 2 and equal or less than 16 !, Now it is "
4850                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4851
4852         return 0;
4853 }
4854
4855 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4856 {
4857         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4858         struct rte_kvargs *kvlist;
4859         int kvargs_count;
4860
4861         /* set default queue number per VF as 4 */
4862         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4863
4864         if (dev->device->devargs == NULL)
4865                 return 0;
4866
4867         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4868         if (kvlist == NULL)
4869                 return -(EINVAL);
4870
4871         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4872         if (!kvargs_count) {
4873                 rte_kvargs_free(kvlist);
4874                 return 0;
4875         }
4876
4877         if (kvargs_count > 1)
4878                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4879                             "the first invalid or last valid one is used !",
4880                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4881
4882         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4883                            i40e_pf_parse_vf_queue_number_handler, pf);
4884
4885         rte_kvargs_free(kvlist);
4886
4887         return 0;
4888 }
4889
4890 static int
4891 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4892 {
4893         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4894         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4895         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4896         uint16_t qp_count = 0, vsi_count = 0;
4897
4898         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4899                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4900                 return -EINVAL;
4901         }
4902
4903         i40e_pf_config_vf_rxq_number(dev);
4904
4905         /* Add the parameter init for LFC */
4906         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4907         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4908         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4909
4910         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4911         pf->max_num_vsi = hw->func_caps.num_vsis;
4912         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4913         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4914
4915         /* FDir queue/VSI allocation */
4916         pf->fdir_qp_offset = 0;
4917         if (hw->func_caps.fd) {
4918                 pf->flags |= I40E_FLAG_FDIR;
4919                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4920         } else {
4921                 pf->fdir_nb_qps = 0;
4922         }
4923         qp_count += pf->fdir_nb_qps;
4924         vsi_count += 1;
4925
4926         /* LAN queue/VSI allocation */
4927         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4928         if (!hw->func_caps.rss) {
4929                 pf->lan_nb_qps = 1;
4930         } else {
4931                 pf->flags |= I40E_FLAG_RSS;
4932                 if (hw->mac.type == I40E_MAC_X722)
4933                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4934                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4935         }
4936         qp_count += pf->lan_nb_qps;
4937         vsi_count += 1;
4938
4939         /* VF queue/VSI allocation */
4940         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4941         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4942                 pf->flags |= I40E_FLAG_SRIOV;
4943                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4944                 pf->vf_num = pci_dev->max_vfs;
4945                 PMD_DRV_LOG(DEBUG,
4946                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4947                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4948         } else {
4949                 pf->vf_nb_qps = 0;
4950                 pf->vf_num = 0;
4951         }
4952         qp_count += pf->vf_nb_qps * pf->vf_num;
4953         vsi_count += pf->vf_num;
4954
4955         /* VMDq queue/VSI allocation */
4956         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4957         pf->vmdq_nb_qps = 0;
4958         pf->max_nb_vmdq_vsi = 0;
4959         if (hw->func_caps.vmdq) {
4960                 if (qp_count < hw->func_caps.num_tx_qp &&
4961                         vsi_count < hw->func_caps.num_vsis) {
4962                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4963                                 qp_count) / pf->vmdq_nb_qp_max;
4964
4965                         /* Limit the maximum number of VMDq vsi to the maximum
4966                          * ethdev can support
4967                          */
4968                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4969                                 hw->func_caps.num_vsis - vsi_count);
4970                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4971                                 ETH_64_POOLS);
4972                         if (pf->max_nb_vmdq_vsi) {
4973                                 pf->flags |= I40E_FLAG_VMDQ;
4974                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4975                                 PMD_DRV_LOG(DEBUG,
4976                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4977                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4978                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4979                         } else {
4980                                 PMD_DRV_LOG(INFO,
4981                                         "No enough queues left for VMDq");
4982                         }
4983                 } else {
4984                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4985                 }
4986         }
4987         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4988         vsi_count += pf->max_nb_vmdq_vsi;
4989
4990         if (hw->func_caps.dcb)
4991                 pf->flags |= I40E_FLAG_DCB;
4992
4993         if (qp_count > hw->func_caps.num_tx_qp) {
4994                 PMD_DRV_LOG(ERR,
4995                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4996                         qp_count, hw->func_caps.num_tx_qp);
4997                 return -EINVAL;
4998         }
4999         if (vsi_count > hw->func_caps.num_vsis) {
5000                 PMD_DRV_LOG(ERR,
5001                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
5002                         vsi_count, hw->func_caps.num_vsis);
5003                 return -EINVAL;
5004         }
5005
5006         return 0;
5007 }
5008
5009 static int
5010 i40e_pf_get_switch_config(struct i40e_pf *pf)
5011 {
5012         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5013         struct i40e_aqc_get_switch_config_resp *switch_config;
5014         struct i40e_aqc_switch_config_element_resp *element;
5015         uint16_t start_seid = 0, num_reported;
5016         int ret;
5017
5018         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
5019                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
5020         if (!switch_config) {
5021                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
5022                 return -ENOMEM;
5023         }
5024
5025         /* Get the switch configurations */
5026         ret = i40e_aq_get_switch_config(hw, switch_config,
5027                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
5028         if (ret != I40E_SUCCESS) {
5029                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
5030                 goto fail;
5031         }
5032         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
5033         if (num_reported != 1) { /* The number should be 1 */
5034                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
5035                 goto fail;
5036         }
5037
5038         /* Parse the switch configuration elements */
5039         element = &(switch_config->element[0]);
5040         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
5041                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
5042                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
5043         } else
5044                 PMD_DRV_LOG(INFO, "Unknown element type");
5045
5046 fail:
5047         rte_free(switch_config);
5048
5049         return ret;
5050 }
5051
5052 static int
5053 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
5054                         uint32_t num)
5055 {
5056         struct pool_entry *entry;
5057
5058         if (pool == NULL || num == 0)
5059                 return -EINVAL;
5060
5061         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
5062         if (entry == NULL) {
5063                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
5064                 return -ENOMEM;
5065         }
5066
5067         /* queue heap initialize */
5068         pool->num_free = num;
5069         pool->num_alloc = 0;
5070         pool->base = base;
5071         LIST_INIT(&pool->alloc_list);
5072         LIST_INIT(&pool->free_list);
5073
5074         /* Initialize element  */
5075         entry->base = 0;
5076         entry->len = num;
5077
5078         LIST_INSERT_HEAD(&pool->free_list, entry, next);
5079         return 0;
5080 }
5081
5082 static void
5083 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
5084 {
5085         struct pool_entry *entry, *next_entry;
5086
5087         if (pool == NULL)
5088                 return;
5089
5090         for (entry = LIST_FIRST(&pool->alloc_list);
5091                         entry && (next_entry = LIST_NEXT(entry, next), 1);
5092                         entry = next_entry) {
5093                 LIST_REMOVE(entry, next);
5094                 rte_free(entry);
5095         }
5096
5097         for (entry = LIST_FIRST(&pool->free_list);
5098                         entry && (next_entry = LIST_NEXT(entry, next), 1);
5099                         entry = next_entry) {
5100                 LIST_REMOVE(entry, next);
5101                 rte_free(entry);
5102         }
5103
5104         pool->num_free = 0;
5105         pool->num_alloc = 0;
5106         pool->base = 0;
5107         LIST_INIT(&pool->alloc_list);
5108         LIST_INIT(&pool->free_list);
5109 }
5110
5111 static int
5112 i40e_res_pool_free(struct i40e_res_pool_info *pool,
5113                        uint32_t base)
5114 {
5115         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
5116         uint32_t pool_offset;
5117         uint16_t len;
5118         int insert;
5119
5120         if (pool == NULL) {
5121                 PMD_DRV_LOG(ERR, "Invalid parameter");
5122                 return -EINVAL;
5123         }
5124
5125         pool_offset = base - pool->base;
5126         /* Lookup in alloc list */
5127         LIST_FOREACH(entry, &pool->alloc_list, next) {
5128                 if (entry->base == pool_offset) {
5129                         valid_entry = entry;
5130                         LIST_REMOVE(entry, next);
5131                         break;
5132                 }
5133         }
5134
5135         /* Not find, return */
5136         if (valid_entry == NULL) {
5137                 PMD_DRV_LOG(ERR, "Failed to find entry");
5138                 return -EINVAL;
5139         }
5140
5141         /**
5142          * Found it, move it to free list  and try to merge.
5143          * In order to make merge easier, always sort it by qbase.
5144          * Find adjacent prev and last entries.
5145          */
5146         prev = next = NULL;
5147         LIST_FOREACH(entry, &pool->free_list, next) {
5148                 if (entry->base > valid_entry->base) {
5149                         next = entry;
5150                         break;
5151                 }
5152                 prev = entry;
5153         }
5154
5155         insert = 0;
5156         len = valid_entry->len;
5157         /* Try to merge with next one*/
5158         if (next != NULL) {
5159                 /* Merge with next one */
5160                 if (valid_entry->base + len == next->base) {
5161                         next->base = valid_entry->base;
5162                         next->len += len;
5163                         rte_free(valid_entry);
5164                         valid_entry = next;
5165                         insert = 1;
5166                 }
5167         }
5168
5169         if (prev != NULL) {
5170                 /* Merge with previous one */
5171                 if (prev->base + prev->len == valid_entry->base) {
5172                         prev->len += len;
5173                         /* If it merge with next one, remove next node */
5174                         if (insert == 1) {
5175                                 LIST_REMOVE(valid_entry, next);
5176                                 rte_free(valid_entry);
5177                                 valid_entry = NULL;
5178                         } else {
5179                                 rte_free(valid_entry);
5180                                 valid_entry = NULL;
5181                                 insert = 1;
5182                         }
5183                 }
5184         }
5185
5186         /* Not find any entry to merge, insert */
5187         if (insert == 0) {
5188                 if (prev != NULL)
5189                         LIST_INSERT_AFTER(prev, valid_entry, next);
5190                 else if (next != NULL)
5191                         LIST_INSERT_BEFORE(next, valid_entry, next);
5192                 else /* It's empty list, insert to head */
5193                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5194         }
5195
5196         pool->num_free += len;
5197         pool->num_alloc -= len;
5198
5199         return 0;
5200 }
5201
5202 static int
5203 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5204                        uint16_t num)
5205 {
5206         struct pool_entry *entry, *valid_entry;
5207
5208         if (pool == NULL || num == 0) {
5209                 PMD_DRV_LOG(ERR, "Invalid parameter");
5210                 return -EINVAL;
5211         }
5212
5213         if (pool->num_free < num) {
5214                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5215                             num, pool->num_free);
5216                 return -ENOMEM;
5217         }
5218
5219         valid_entry = NULL;
5220         /* Lookup  in free list and find most fit one */
5221         LIST_FOREACH(entry, &pool->free_list, next) {
5222                 if (entry->len >= num) {
5223                         /* Find best one */
5224                         if (entry->len == num) {
5225                                 valid_entry = entry;
5226                                 break;
5227                         }
5228                         if (valid_entry == NULL || valid_entry->len > entry->len)
5229                                 valid_entry = entry;
5230                 }
5231         }
5232
5233         /* Not find one to satisfy the request, return */
5234         if (valid_entry == NULL) {
5235                 PMD_DRV_LOG(ERR, "No valid entry found");
5236                 return -ENOMEM;
5237         }
5238         /**
5239          * The entry have equal queue number as requested,
5240          * remove it from alloc_list.
5241          */
5242         if (valid_entry->len == num) {
5243                 LIST_REMOVE(valid_entry, next);
5244         } else {
5245                 /**
5246                  * The entry have more numbers than requested,
5247                  * create a new entry for alloc_list and minus its
5248                  * queue base and number in free_list.
5249                  */
5250                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5251                 if (entry == NULL) {
5252                         PMD_DRV_LOG(ERR,
5253                                 "Failed to allocate memory for resource pool");
5254                         return -ENOMEM;
5255                 }
5256                 entry->base = valid_entry->base;
5257                 entry->len = num;
5258                 valid_entry->base += num;
5259                 valid_entry->len -= num;
5260                 valid_entry = entry;
5261         }
5262
5263         /* Insert it into alloc list, not sorted */
5264         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5265
5266         pool->num_free -= valid_entry->len;
5267         pool->num_alloc += valid_entry->len;
5268
5269         return valid_entry->base + pool->base;
5270 }
5271
5272 /**
5273  * bitmap_is_subset - Check whether src2 is subset of src1
5274  **/
5275 static inline int
5276 bitmap_is_subset(uint8_t src1, uint8_t src2)
5277 {
5278         return !((src1 ^ src2) & src2);
5279 }
5280
5281 static enum i40e_status_code
5282 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5283 {
5284         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5285
5286         /* If DCB is not supported, only default TC is supported */
5287         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5288                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5289                 return I40E_NOT_SUPPORTED;
5290         }
5291
5292         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5293                 PMD_DRV_LOG(ERR,
5294                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5295                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5296                 return I40E_NOT_SUPPORTED;
5297         }
5298         return I40E_SUCCESS;
5299 }
5300
5301 int
5302 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5303                                 struct i40e_vsi_vlan_pvid_info *info)
5304 {
5305         struct i40e_hw *hw;
5306         struct i40e_vsi_context ctxt;
5307         uint8_t vlan_flags = 0;
5308         int ret;
5309
5310         if (vsi == NULL || info == NULL) {
5311                 PMD_DRV_LOG(ERR, "invalid parameters");
5312                 return I40E_ERR_PARAM;
5313         }
5314
5315         if (info->on) {
5316                 vsi->info.pvid = info->config.pvid;
5317                 /**
5318                  * If insert pvid is enabled, only tagged pkts are
5319                  * allowed to be sent out.
5320                  */
5321                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5322                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5323         } else {
5324                 vsi->info.pvid = 0;
5325                 if (info->config.reject.tagged == 0)
5326                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5327
5328                 if (info->config.reject.untagged == 0)
5329                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5330         }
5331         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5332                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5333         vsi->info.port_vlan_flags |= vlan_flags;
5334         vsi->info.valid_sections =
5335                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5336         memset(&ctxt, 0, sizeof(ctxt));
5337         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5338         ctxt.seid = vsi->seid;
5339
5340         hw = I40E_VSI_TO_HW(vsi);
5341         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5342         if (ret != I40E_SUCCESS)
5343                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5344
5345         return ret;
5346 }
5347
5348 static int
5349 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5350 {
5351         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5352         int i, ret;
5353         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5354
5355         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5356         if (ret != I40E_SUCCESS)
5357                 return ret;
5358
5359         if (!vsi->seid) {
5360                 PMD_DRV_LOG(ERR, "seid not valid");
5361                 return -EINVAL;
5362         }
5363
5364         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5365         tc_bw_data.tc_valid_bits = enabled_tcmap;
5366         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5367                 tc_bw_data.tc_bw_credits[i] =
5368                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5369
5370         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5371         if (ret != I40E_SUCCESS) {
5372                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5373                 return ret;
5374         }
5375
5376         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5377                                         sizeof(vsi->info.qs_handle));
5378         return I40E_SUCCESS;
5379 }
5380
5381 static enum i40e_status_code
5382 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5383                                  struct i40e_aqc_vsi_properties_data *info,
5384                                  uint8_t enabled_tcmap)
5385 {
5386         enum i40e_status_code ret;
5387         int i, total_tc = 0;
5388         uint16_t qpnum_per_tc, bsf, qp_idx;
5389
5390         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5391         if (ret != I40E_SUCCESS)
5392                 return ret;
5393
5394         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5395                 if (enabled_tcmap & (1 << i))
5396                         total_tc++;
5397         if (total_tc == 0)
5398                 total_tc = 1;
5399         vsi->enabled_tc = enabled_tcmap;
5400
5401         /* Number of queues per enabled TC */
5402         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5403         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5404         bsf = rte_bsf32(qpnum_per_tc);
5405
5406         /* Adjust the queue number to actual queues that can be applied */
5407         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5408                 vsi->nb_qps = qpnum_per_tc * total_tc;
5409
5410         /**
5411          * Configure TC and queue mapping parameters, for enabled TC,
5412          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5413          * default queue will serve it.
5414          */
5415         qp_idx = 0;
5416         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5417                 if (vsi->enabled_tc & (1 << i)) {
5418                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5419                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5420                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5421                         qp_idx += qpnum_per_tc;
5422                 } else
5423                         info->tc_mapping[i] = 0;
5424         }
5425
5426         /* Associate queue number with VSI */
5427         if (vsi->type == I40E_VSI_SRIOV) {
5428                 info->mapping_flags |=
5429                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5430                 for (i = 0; i < vsi->nb_qps; i++)
5431                         info->queue_mapping[i] =
5432                                 rte_cpu_to_le_16(vsi->base_queue + i);
5433         } else {
5434                 info->mapping_flags |=
5435                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5436                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5437         }
5438         info->valid_sections |=
5439                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5440
5441         return I40E_SUCCESS;
5442 }
5443
5444 static int
5445 i40e_veb_release(struct i40e_veb *veb)
5446 {
5447         struct i40e_vsi *vsi;
5448         struct i40e_hw *hw;
5449
5450         if (veb == NULL)
5451                 return -EINVAL;
5452
5453         if (!TAILQ_EMPTY(&veb->head)) {
5454                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5455                 return -EACCES;
5456         }
5457         /* associate_vsi field is NULL for floating VEB */
5458         if (veb->associate_vsi != NULL) {
5459                 vsi = veb->associate_vsi;
5460                 hw = I40E_VSI_TO_HW(vsi);
5461
5462                 vsi->uplink_seid = veb->uplink_seid;
5463                 vsi->veb = NULL;
5464         } else {
5465                 veb->associate_pf->main_vsi->floating_veb = NULL;
5466                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5467         }
5468
5469         i40e_aq_delete_element(hw, veb->seid, NULL);
5470         rte_free(veb);
5471         return I40E_SUCCESS;
5472 }
5473
5474 /* Setup a veb */
5475 static struct i40e_veb *
5476 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5477 {
5478         struct i40e_veb *veb;
5479         int ret;
5480         struct i40e_hw *hw;
5481
5482         if (pf == NULL) {
5483                 PMD_DRV_LOG(ERR,
5484                             "veb setup failed, associated PF shouldn't null");
5485                 return NULL;
5486         }
5487         hw = I40E_PF_TO_HW(pf);
5488
5489         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5490         if (!veb) {
5491                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5492                 goto fail;
5493         }
5494
5495         veb->associate_vsi = vsi;
5496         veb->associate_pf = pf;
5497         TAILQ_INIT(&veb->head);
5498         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5499
5500         /* create floating veb if vsi is NULL */
5501         if (vsi != NULL) {
5502                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5503                                       I40E_DEFAULT_TCMAP, false,
5504                                       &veb->seid, false, NULL);
5505         } else {
5506                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5507                                       true, &veb->seid, false, NULL);
5508         }
5509
5510         if (ret != I40E_SUCCESS) {
5511                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5512                             hw->aq.asq_last_status);
5513                 goto fail;
5514         }
5515         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5516
5517         /* get statistics index */
5518         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5519                                 &veb->stats_idx, NULL, NULL, NULL);
5520         if (ret != I40E_SUCCESS) {
5521                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5522                             hw->aq.asq_last_status);
5523                 goto fail;
5524         }
5525         /* Get VEB bandwidth, to be implemented */
5526         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5527         if (vsi)
5528                 vsi->uplink_seid = veb->seid;
5529
5530         return veb;
5531 fail:
5532         rte_free(veb);
5533         return NULL;
5534 }
5535
5536 int
5537 i40e_vsi_release(struct i40e_vsi *vsi)
5538 {
5539         struct i40e_pf *pf;
5540         struct i40e_hw *hw;
5541         struct i40e_vsi_list *vsi_list;
5542         void *temp;
5543         int ret;
5544         struct i40e_mac_filter *f;
5545         uint16_t user_param;
5546
5547         if (!vsi)
5548                 return I40E_SUCCESS;
5549
5550         if (!vsi->adapter)
5551                 return -EFAULT;
5552
5553         user_param = vsi->user_param;
5554
5555         pf = I40E_VSI_TO_PF(vsi);
5556         hw = I40E_VSI_TO_HW(vsi);
5557
5558         /* VSI has child to attach, release child first */
5559         if (vsi->veb) {
5560                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5561                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5562                                 return -1;
5563                 }
5564                 i40e_veb_release(vsi->veb);
5565         }
5566
5567         if (vsi->floating_veb) {
5568                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5569                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5570                                 return -1;
5571                 }
5572         }
5573
5574         /* Remove all macvlan filters of the VSI */
5575         i40e_vsi_remove_all_macvlan_filter(vsi);
5576         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5577                 rte_free(f);
5578
5579         if (vsi->type != I40E_VSI_MAIN &&
5580             ((vsi->type != I40E_VSI_SRIOV) ||
5581             !pf->floating_veb_list[user_param])) {
5582                 /* Remove vsi from parent's sibling list */
5583                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5584                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5585                         return I40E_ERR_PARAM;
5586                 }
5587                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5588                                 &vsi->sib_vsi_list, list);
5589
5590                 /* Remove all switch element of the VSI */
5591                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5592                 if (ret != I40E_SUCCESS)
5593                         PMD_DRV_LOG(ERR, "Failed to delete element");
5594         }
5595
5596         if ((vsi->type == I40E_VSI_SRIOV) &&
5597             pf->floating_veb_list[user_param]) {
5598                 /* Remove vsi from parent's sibling list */
5599                 if (vsi->parent_vsi == NULL ||
5600                     vsi->parent_vsi->floating_veb == NULL) {
5601                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5602                         return I40E_ERR_PARAM;
5603                 }
5604                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5605                              &vsi->sib_vsi_list, list);
5606
5607                 /* Remove all switch element of the VSI */
5608                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5609                 if (ret != I40E_SUCCESS)
5610                         PMD_DRV_LOG(ERR, "Failed to delete element");
5611         }
5612
5613         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5614
5615         if (vsi->type != I40E_VSI_SRIOV)
5616                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5617         rte_free(vsi);
5618
5619         return I40E_SUCCESS;
5620 }
5621
5622 static int
5623 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5624 {
5625         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5626         struct i40e_aqc_remove_macvlan_element_data def_filter;
5627         struct i40e_mac_filter_info filter;
5628         int ret;
5629
5630         if (vsi->type != I40E_VSI_MAIN)
5631                 return I40E_ERR_CONFIG;
5632         memset(&def_filter, 0, sizeof(def_filter));
5633         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5634                                         ETH_ADDR_LEN);
5635         def_filter.vlan_tag = 0;
5636         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5637                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5638         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5639         if (ret != I40E_SUCCESS) {
5640                 struct i40e_mac_filter *f;
5641                 struct rte_ether_addr *mac;
5642
5643                 PMD_DRV_LOG(DEBUG,
5644                             "Cannot remove the default macvlan filter");
5645                 /* It needs to add the permanent mac into mac list */
5646                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5647                 if (f == NULL) {
5648                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5649                         return I40E_ERR_NO_MEMORY;
5650                 }
5651                 mac = &f->mac_info.mac_addr;
5652                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5653                                 ETH_ADDR_LEN);
5654                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5655                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5656                 vsi->mac_num++;
5657
5658                 return ret;
5659         }
5660         rte_memcpy(&filter.mac_addr,
5661                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5662         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5663         return i40e_vsi_add_mac(vsi, &filter);
5664 }
5665
5666 /*
5667  * i40e_vsi_get_bw_config - Query VSI BW Information
5668  * @vsi: the VSI to be queried
5669  *
5670  * Returns 0 on success, negative value on failure
5671  */
5672 static enum i40e_status_code
5673 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5674 {
5675         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5676         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5677         struct i40e_hw *hw = &vsi->adapter->hw;
5678         i40e_status ret;
5679         int i;
5680         uint32_t bw_max;
5681
5682         memset(&bw_config, 0, sizeof(bw_config));
5683         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5684         if (ret != I40E_SUCCESS) {
5685                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5686                             hw->aq.asq_last_status);
5687                 return ret;
5688         }
5689
5690         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5691         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5692                                         &ets_sla_config, NULL);
5693         if (ret != I40E_SUCCESS) {
5694                 PMD_DRV_LOG(ERR,
5695                         "VSI failed to get TC bandwdith configuration %u",
5696                         hw->aq.asq_last_status);
5697                 return ret;
5698         }
5699
5700         /* store and print out BW info */
5701         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5702         vsi->bw_info.bw_max = bw_config.max_bw;
5703         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5704         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5705         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5706                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5707                      I40E_16_BIT_WIDTH);
5708         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5709                 vsi->bw_info.bw_ets_share_credits[i] =
5710                                 ets_sla_config.share_credits[i];
5711                 vsi->bw_info.bw_ets_credits[i] =
5712                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5713                 /* 4 bits per TC, 4th bit is reserved */
5714                 vsi->bw_info.bw_ets_max[i] =
5715                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5716                                   RTE_LEN2MASK(3, uint8_t));
5717                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5718                             vsi->bw_info.bw_ets_share_credits[i]);
5719                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5720                             vsi->bw_info.bw_ets_credits[i]);
5721                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5722                             vsi->bw_info.bw_ets_max[i]);
5723         }
5724
5725         return I40E_SUCCESS;
5726 }
5727
5728 /* i40e_enable_pf_lb
5729  * @pf: pointer to the pf structure
5730  *
5731  * allow loopback on pf
5732  */
5733 static inline void
5734 i40e_enable_pf_lb(struct i40e_pf *pf)
5735 {
5736         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5737         struct i40e_vsi_context ctxt;
5738         int ret;
5739
5740         /* Use the FW API if FW >= v5.0 */
5741         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5742                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5743                 return;
5744         }
5745
5746         memset(&ctxt, 0, sizeof(ctxt));
5747         ctxt.seid = pf->main_vsi_seid;
5748         ctxt.pf_num = hw->pf_id;
5749         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5750         if (ret) {
5751                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5752                             ret, hw->aq.asq_last_status);
5753                 return;
5754         }
5755         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5756         ctxt.info.valid_sections =
5757                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5758         ctxt.info.switch_id |=
5759                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5760
5761         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5762         if (ret)
5763                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5764                             hw->aq.asq_last_status);
5765 }
5766
5767 /* Setup a VSI */
5768 struct i40e_vsi *
5769 i40e_vsi_setup(struct i40e_pf *pf,
5770                enum i40e_vsi_type type,
5771                struct i40e_vsi *uplink_vsi,
5772                uint16_t user_param)
5773 {
5774         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5775         struct i40e_vsi *vsi;
5776         struct i40e_mac_filter_info filter;
5777         int ret;
5778         struct i40e_vsi_context ctxt;
5779         struct rte_ether_addr broadcast =
5780                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5781
5782         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5783             uplink_vsi == NULL) {
5784                 PMD_DRV_LOG(ERR,
5785                         "VSI setup failed, VSI link shouldn't be NULL");
5786                 return NULL;
5787         }
5788
5789         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5790                 PMD_DRV_LOG(ERR,
5791                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5792                 return NULL;
5793         }
5794
5795         /* two situations
5796          * 1.type is not MAIN and uplink vsi is not NULL
5797          * If uplink vsi didn't setup VEB, create one first under veb field
5798          * 2.type is SRIOV and the uplink is NULL
5799          * If floating VEB is NULL, create one veb under floating veb field
5800          */
5801
5802         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5803             uplink_vsi->veb == NULL) {
5804                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5805
5806                 if (uplink_vsi->veb == NULL) {
5807                         PMD_DRV_LOG(ERR, "VEB setup failed");
5808                         return NULL;
5809                 }
5810                 /* set ALLOWLOOPBACk on pf, when veb is created */
5811                 i40e_enable_pf_lb(pf);
5812         }
5813
5814         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5815             pf->main_vsi->floating_veb == NULL) {
5816                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5817
5818                 if (pf->main_vsi->floating_veb == NULL) {
5819                         PMD_DRV_LOG(ERR, "VEB setup failed");
5820                         return NULL;
5821                 }
5822         }
5823
5824         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5825         if (!vsi) {
5826                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5827                 return NULL;
5828         }
5829         TAILQ_INIT(&vsi->mac_list);
5830         vsi->type = type;
5831         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5832         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5833         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5834         vsi->user_param = user_param;
5835         vsi->vlan_anti_spoof_on = 0;
5836         vsi->vlan_filter_on = 0;
5837         /* Allocate queues */
5838         switch (vsi->type) {
5839         case I40E_VSI_MAIN  :
5840                 vsi->nb_qps = pf->lan_nb_qps;
5841                 break;
5842         case I40E_VSI_SRIOV :
5843                 vsi->nb_qps = pf->vf_nb_qps;
5844                 break;
5845         case I40E_VSI_VMDQ2:
5846                 vsi->nb_qps = pf->vmdq_nb_qps;
5847                 break;
5848         case I40E_VSI_FDIR:
5849                 vsi->nb_qps = pf->fdir_nb_qps;
5850                 break;
5851         default:
5852                 goto fail_mem;
5853         }
5854         /*
5855          * The filter status descriptor is reported in rx queue 0,
5856          * while the tx queue for fdir filter programming has no
5857          * such constraints, can be non-zero queues.
5858          * To simplify it, choose FDIR vsi use queue 0 pair.
5859          * To make sure it will use queue 0 pair, queue allocation
5860          * need be done before this function is called
5861          */
5862         if (type != I40E_VSI_FDIR) {
5863                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5864                         if (ret < 0) {
5865                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5866                                                 vsi->seid, ret);
5867                                 goto fail_mem;
5868                         }
5869                         vsi->base_queue = ret;
5870         } else
5871                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5872
5873         /* VF has MSIX interrupt in VF range, don't allocate here */
5874         if (type == I40E_VSI_MAIN) {
5875                 if (pf->support_multi_driver) {
5876                         /* If support multi-driver, need to use INT0 instead of
5877                          * allocating from msix pool. The Msix pool is init from
5878                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5879                          * to 1 without calling i40e_res_pool_alloc.
5880                          */
5881                         vsi->msix_intr = 0;
5882                         vsi->nb_msix = 1;
5883                 } else {
5884                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5885                                                   RTE_MIN(vsi->nb_qps,
5886                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5887                         if (ret < 0) {
5888                                 PMD_DRV_LOG(ERR,
5889                                             "VSI MAIN %d get heap failed %d",
5890                                             vsi->seid, ret);
5891                                 goto fail_queue_alloc;
5892                         }
5893                         vsi->msix_intr = ret;
5894                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5895                                                RTE_MAX_RXTX_INTR_VEC_ID);
5896                 }
5897         } else if (type != I40E_VSI_SRIOV) {
5898                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5899                 if (ret < 0) {
5900                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5901                         if (type != I40E_VSI_FDIR)
5902                                 goto fail_queue_alloc;
5903                         vsi->msix_intr = 0;
5904                         vsi->nb_msix = 0;
5905                 } else {
5906                         vsi->msix_intr = ret;
5907                         vsi->nb_msix = 1;
5908                 }
5909         } else {
5910                 vsi->msix_intr = 0;
5911                 vsi->nb_msix = 0;
5912         }
5913
5914         /* Add VSI */
5915         if (type == I40E_VSI_MAIN) {
5916                 /* For main VSI, no need to add since it's default one */
5917                 vsi->uplink_seid = pf->mac_seid;
5918                 vsi->seid = pf->main_vsi_seid;
5919                 /* Bind queues with specific MSIX interrupt */
5920                 /**
5921                  * Needs 2 interrupt at least, one for misc cause which will
5922                  * enabled from OS side, Another for queues binding the
5923                  * interrupt from device side only.
5924                  */
5925
5926                 /* Get default VSI parameters from hardware */
5927                 memset(&ctxt, 0, sizeof(ctxt));
5928                 ctxt.seid = vsi->seid;
5929                 ctxt.pf_num = hw->pf_id;
5930                 ctxt.uplink_seid = vsi->uplink_seid;
5931                 ctxt.vf_num = 0;
5932                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5933                 if (ret != I40E_SUCCESS) {
5934                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5935                         goto fail_msix_alloc;
5936                 }
5937                 rte_memcpy(&vsi->info, &ctxt.info,
5938                         sizeof(struct i40e_aqc_vsi_properties_data));
5939                 vsi->vsi_id = ctxt.vsi_number;
5940                 vsi->info.valid_sections = 0;
5941
5942                 /* Configure tc, enabled TC0 only */
5943                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5944                         I40E_SUCCESS) {
5945                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5946                         goto fail_msix_alloc;
5947                 }
5948
5949                 /* TC, queue mapping */
5950                 memset(&ctxt, 0, sizeof(ctxt));
5951                 vsi->info.valid_sections |=
5952                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5953                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5954                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5955                 rte_memcpy(&ctxt.info, &vsi->info,
5956                         sizeof(struct i40e_aqc_vsi_properties_data));
5957                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5958                                                 I40E_DEFAULT_TCMAP);
5959                 if (ret != I40E_SUCCESS) {
5960                         PMD_DRV_LOG(ERR,
5961                                 "Failed to configure TC queue mapping");
5962                         goto fail_msix_alloc;
5963                 }
5964                 ctxt.seid = vsi->seid;
5965                 ctxt.pf_num = hw->pf_id;
5966                 ctxt.uplink_seid = vsi->uplink_seid;
5967                 ctxt.vf_num = 0;
5968
5969                 /* Update VSI parameters */
5970                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5971                 if (ret != I40E_SUCCESS) {
5972                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5973                         goto fail_msix_alloc;
5974                 }
5975
5976                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5977                                                 sizeof(vsi->info.tc_mapping));
5978                 rte_memcpy(&vsi->info.queue_mapping,
5979                                 &ctxt.info.queue_mapping,
5980                         sizeof(vsi->info.queue_mapping));
5981                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5982                 vsi->info.valid_sections = 0;
5983
5984                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5985                                 ETH_ADDR_LEN);
5986
5987                 /**
5988                  * Updating default filter settings are necessary to prevent
5989                  * reception of tagged packets.
5990                  * Some old firmware configurations load a default macvlan
5991                  * filter which accepts both tagged and untagged packets.
5992                  * The updating is to use a normal filter instead if needed.
5993                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5994                  * The firmware with correct configurations load the default
5995                  * macvlan filter which is expected and cannot be removed.
5996                  */
5997                 i40e_update_default_filter_setting(vsi);
5998                 i40e_config_qinq(hw, vsi);
5999         } else if (type == I40E_VSI_SRIOV) {
6000                 memset(&ctxt, 0, sizeof(ctxt));
6001                 /**
6002                  * For other VSI, the uplink_seid equals to uplink VSI's
6003                  * uplink_seid since they share same VEB
6004                  */
6005                 if (uplink_vsi == NULL)
6006                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
6007                 else
6008                         vsi->uplink_seid = uplink_vsi->uplink_seid;
6009                 ctxt.pf_num = hw->pf_id;
6010                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
6011                 ctxt.uplink_seid = vsi->uplink_seid;
6012                 ctxt.connection_type = 0x1;
6013                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6014
6015                 /* Use the VEB configuration if FW >= v5.0 */
6016                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
6017                         /* Configure switch ID */
6018                         ctxt.info.valid_sections |=
6019                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6020                         ctxt.info.switch_id =
6021                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6022                 }
6023
6024                 /* Configure port/vlan */
6025                 ctxt.info.valid_sections |=
6026                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6027                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6028                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6029                                                 hw->func_caps.enabled_tcmap);
6030                 if (ret != I40E_SUCCESS) {
6031                         PMD_DRV_LOG(ERR,
6032                                 "Failed to configure TC queue mapping");
6033                         goto fail_msix_alloc;
6034                 }
6035
6036                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
6037                 ctxt.info.valid_sections |=
6038                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6039                 /**
6040                  * Since VSI is not created yet, only configure parameter,
6041                  * will add vsi below.
6042                  */
6043
6044                 i40e_config_qinq(hw, vsi);
6045         } else if (type == I40E_VSI_VMDQ2) {
6046                 memset(&ctxt, 0, sizeof(ctxt));
6047                 /*
6048                  * For other VSI, the uplink_seid equals to uplink VSI's
6049                  * uplink_seid since they share same VEB
6050                  */
6051                 vsi->uplink_seid = uplink_vsi->uplink_seid;
6052                 ctxt.pf_num = hw->pf_id;
6053                 ctxt.vf_num = 0;
6054                 ctxt.uplink_seid = vsi->uplink_seid;
6055                 ctxt.connection_type = 0x1;
6056                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6057
6058                 ctxt.info.valid_sections |=
6059                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6060                 /* user_param carries flag to enable loop back */
6061                 if (user_param) {
6062                         ctxt.info.switch_id =
6063                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6064                         ctxt.info.switch_id |=
6065                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6066                 }
6067
6068                 /* Configure port/vlan */
6069                 ctxt.info.valid_sections |=
6070                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6071                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6072                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6073                                                 I40E_DEFAULT_TCMAP);
6074                 if (ret != I40E_SUCCESS) {
6075                         PMD_DRV_LOG(ERR,
6076                                 "Failed to configure TC queue mapping");
6077                         goto fail_msix_alloc;
6078                 }
6079                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6080                 ctxt.info.valid_sections |=
6081                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6082         } else if (type == I40E_VSI_FDIR) {
6083                 memset(&ctxt, 0, sizeof(ctxt));
6084                 vsi->uplink_seid = uplink_vsi->uplink_seid;
6085                 ctxt.pf_num = hw->pf_id;
6086                 ctxt.vf_num = 0;
6087                 ctxt.uplink_seid = vsi->uplink_seid;
6088                 ctxt.connection_type = 0x1;     /* regular data port */
6089                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6090                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6091                                                 I40E_DEFAULT_TCMAP);
6092                 if (ret != I40E_SUCCESS) {
6093                         PMD_DRV_LOG(ERR,
6094                                 "Failed to configure TC queue mapping.");
6095                         goto fail_msix_alloc;
6096                 }
6097                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6098                 ctxt.info.valid_sections |=
6099                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6100         } else {
6101                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
6102                 goto fail_msix_alloc;
6103         }
6104
6105         if (vsi->type != I40E_VSI_MAIN) {
6106                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6107                 if (ret != I40E_SUCCESS) {
6108                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
6109                                     hw->aq.asq_last_status);
6110                         goto fail_msix_alloc;
6111                 }
6112                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6113                 vsi->info.valid_sections = 0;
6114                 vsi->seid = ctxt.seid;
6115                 vsi->vsi_id = ctxt.vsi_number;
6116                 vsi->sib_vsi_list.vsi = vsi;
6117                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
6118                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6119                                           &vsi->sib_vsi_list, list);
6120                 } else {
6121                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6122                                           &vsi->sib_vsi_list, list);
6123                 }
6124         }
6125
6126         /* MAC/VLAN configuration */
6127         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6128         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
6129
6130         ret = i40e_vsi_add_mac(vsi, &filter);
6131         if (ret != I40E_SUCCESS) {
6132                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6133                 goto fail_msix_alloc;
6134         }
6135
6136         /* Get VSI BW information */
6137         i40e_vsi_get_bw_config(vsi);
6138         return vsi;
6139 fail_msix_alloc:
6140         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6141 fail_queue_alloc:
6142         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6143 fail_mem:
6144         rte_free(vsi);
6145         return NULL;
6146 }
6147
6148 /* Configure vlan filter on or off */
6149 int
6150 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6151 {
6152         int i, num;
6153         struct i40e_mac_filter *f;
6154         void *temp;
6155         struct i40e_mac_filter_info *mac_filter;
6156         enum rte_mac_filter_type desired_filter;
6157         int ret = I40E_SUCCESS;
6158
6159         if (on) {
6160                 /* Filter to match MAC and VLAN */
6161                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
6162         } else {
6163                 /* Filter to match only MAC */
6164                 desired_filter = RTE_MAC_PERFECT_MATCH;
6165         }
6166
6167         num = vsi->mac_num;
6168
6169         mac_filter = rte_zmalloc("mac_filter_info_data",
6170                                  num * sizeof(*mac_filter), 0);
6171         if (mac_filter == NULL) {
6172                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6173                 return I40E_ERR_NO_MEMORY;
6174         }
6175
6176         i = 0;
6177
6178         /* Remove all existing mac */
6179         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6180                 mac_filter[i] = f->mac_info;
6181                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6182                 if (ret) {
6183                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6184                                     on ? "enable" : "disable");
6185                         goto DONE;
6186                 }
6187                 i++;
6188         }
6189
6190         /* Override with new filter */
6191         for (i = 0; i < num; i++) {
6192                 mac_filter[i].filter_type = desired_filter;
6193                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6194                 if (ret) {
6195                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6196                                     on ? "enable" : "disable");
6197                         goto DONE;
6198                 }
6199         }
6200
6201 DONE:
6202         rte_free(mac_filter);
6203         return ret;
6204 }
6205
6206 /* Configure vlan stripping on or off */
6207 int
6208 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6209 {
6210         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6211         struct i40e_vsi_context ctxt;
6212         uint8_t vlan_flags;
6213         int ret = I40E_SUCCESS;
6214
6215         /* Check if it has been already on or off */
6216         if (vsi->info.valid_sections &
6217                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6218                 if (on) {
6219                         if ((vsi->info.port_vlan_flags &
6220                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6221                                 return 0; /* already on */
6222                 } else {
6223                         if ((vsi->info.port_vlan_flags &
6224                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6225                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6226                                 return 0; /* already off */
6227                 }
6228         }
6229
6230         if (on)
6231                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6232         else
6233                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6234         vsi->info.valid_sections =
6235                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6236         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6237         vsi->info.port_vlan_flags |= vlan_flags;
6238         ctxt.seid = vsi->seid;
6239         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6240         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6241         if (ret)
6242                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6243                             on ? "enable" : "disable");
6244
6245         return ret;
6246 }
6247
6248 static int
6249 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6250 {
6251         struct rte_eth_dev_data *data = dev->data;
6252         int ret;
6253         int mask = 0;
6254
6255         /* Apply vlan offload setting */
6256         mask = ETH_VLAN_STRIP_MASK |
6257                ETH_QINQ_STRIP_MASK |
6258                ETH_VLAN_FILTER_MASK |
6259                ETH_VLAN_EXTEND_MASK;
6260         ret = i40e_vlan_offload_set(dev, mask);
6261         if (ret) {
6262                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6263                 return ret;
6264         }
6265
6266         /* Apply pvid setting */
6267         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6268                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6269         if (ret)
6270                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6271
6272         return ret;
6273 }
6274
6275 static int
6276 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6277 {
6278         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6279
6280         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6281 }
6282
6283 static int
6284 i40e_update_flow_control(struct i40e_hw *hw)
6285 {
6286 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6287         struct i40e_link_status link_status;
6288         uint32_t rxfc = 0, txfc = 0, reg;
6289         uint8_t an_info;
6290         int ret;
6291
6292         memset(&link_status, 0, sizeof(link_status));
6293         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6294         if (ret != I40E_SUCCESS) {
6295                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6296                 goto write_reg; /* Disable flow control */
6297         }
6298
6299         an_info = hw->phy.link_info.an_info;
6300         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6301                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6302                 ret = I40E_ERR_NOT_READY;
6303                 goto write_reg; /* Disable flow control */
6304         }
6305         /**
6306          * If link auto negotiation is enabled, flow control needs to
6307          * be configured according to it
6308          */
6309         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6310         case I40E_LINK_PAUSE_RXTX:
6311                 rxfc = 1;
6312                 txfc = 1;
6313                 hw->fc.current_mode = I40E_FC_FULL;
6314                 break;
6315         case I40E_AQ_LINK_PAUSE_RX:
6316                 rxfc = 1;
6317                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6318                 break;
6319         case I40E_AQ_LINK_PAUSE_TX:
6320                 txfc = 1;
6321                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6322                 break;
6323         default:
6324                 hw->fc.current_mode = I40E_FC_NONE;
6325                 break;
6326         }
6327
6328 write_reg:
6329         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6330                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6331         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6332         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6333         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6334         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6335
6336         return ret;
6337 }
6338
6339 /* PF setup */
6340 static int
6341 i40e_pf_setup(struct i40e_pf *pf)
6342 {
6343         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6344         struct i40e_filter_control_settings settings;
6345         struct i40e_vsi *vsi;
6346         int ret;
6347
6348         /* Clear all stats counters */
6349         pf->offset_loaded = FALSE;
6350         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6351         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6352         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6353         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6354
6355         ret = i40e_pf_get_switch_config(pf);
6356         if (ret != I40E_SUCCESS) {
6357                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6358                 return ret;
6359         }
6360
6361         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6362         if (ret)
6363                 PMD_INIT_LOG(WARNING,
6364                         "failed to allocate switch domain for device %d", ret);
6365
6366         if (pf->flags & I40E_FLAG_FDIR) {
6367                 /* make queue allocated first, let FDIR use queue pair 0*/
6368                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6369                 if (ret != I40E_FDIR_QUEUE_ID) {
6370                         PMD_DRV_LOG(ERR,
6371                                 "queue allocation fails for FDIR: ret =%d",
6372                                 ret);
6373                         pf->flags &= ~I40E_FLAG_FDIR;
6374                 }
6375         }
6376         /*  main VSI setup */
6377         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6378         if (!vsi) {
6379                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6380                 return I40E_ERR_NOT_READY;
6381         }
6382         pf->main_vsi = vsi;
6383
6384         /* Configure filter control */
6385         memset(&settings, 0, sizeof(settings));
6386         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6387                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6388         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6389                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6390         else {
6391                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6392                         hw->func_caps.rss_table_size);
6393                 return I40E_ERR_PARAM;
6394         }
6395         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6396                 hw->func_caps.rss_table_size);
6397         pf->hash_lut_size = hw->func_caps.rss_table_size;
6398
6399         /* Enable ethtype and macvlan filters */
6400         settings.enable_ethtype = TRUE;
6401         settings.enable_macvlan = TRUE;
6402         ret = i40e_set_filter_control(hw, &settings);
6403         if (ret)
6404                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6405                                                                 ret);
6406
6407         /* Update flow control according to the auto negotiation */
6408         i40e_update_flow_control(hw);
6409
6410         return I40E_SUCCESS;
6411 }
6412
6413 int
6414 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6415 {
6416         uint32_t reg;
6417         uint16_t j;
6418
6419         /**
6420          * Set or clear TX Queue Disable flags,
6421          * which is required by hardware.
6422          */
6423         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6424         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6425
6426         /* Wait until the request is finished */
6427         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6428                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6429                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6430                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6431                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6432                                                         & 0x1))) {
6433                         break;
6434                 }
6435         }
6436         if (on) {
6437                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6438                         return I40E_SUCCESS; /* already on, skip next steps */
6439
6440                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6441                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6442         } else {
6443                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6444                         return I40E_SUCCESS; /* already off, skip next steps */
6445                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6446         }
6447         /* Write the register */
6448         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6449         /* Check the result */
6450         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6451                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6452                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6453                 if (on) {
6454                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6455                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6456                                 break;
6457                 } else {
6458                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6459                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6460                                 break;
6461                 }
6462         }
6463         /* Check if it is timeout */
6464         if (j >= I40E_CHK_Q_ENA_COUNT) {
6465                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6466                             (on ? "enable" : "disable"), q_idx);
6467                 return I40E_ERR_TIMEOUT;
6468         }
6469
6470         return I40E_SUCCESS;
6471 }
6472
6473 int
6474 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6475 {
6476         uint32_t reg;
6477         uint16_t j;
6478
6479         /* Wait until the request is finished */
6480         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6481                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6482                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6483                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6484                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6485                         break;
6486         }
6487
6488         if (on) {
6489                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6490                         return I40E_SUCCESS; /* Already on, skip next steps */
6491                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6492         } else {
6493                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6494                         return I40E_SUCCESS; /* Already off, skip next steps */
6495                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6496         }
6497
6498         /* Write the register */
6499         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6500         /* Check the result */
6501         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6502                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6503                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6504                 if (on) {
6505                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6506                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6507                                 break;
6508                 } else {
6509                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6510                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6511                                 break;
6512                 }
6513         }
6514
6515         /* Check if it is timeout */
6516         if (j >= I40E_CHK_Q_ENA_COUNT) {
6517                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6518                             (on ? "enable" : "disable"), q_idx);
6519                 return I40E_ERR_TIMEOUT;
6520         }
6521
6522         return I40E_SUCCESS;
6523 }
6524
6525 /* Initialize VSI for TX */
6526 static int
6527 i40e_dev_tx_init(struct i40e_pf *pf)
6528 {
6529         struct rte_eth_dev_data *data = pf->dev_data;
6530         uint16_t i;
6531         uint32_t ret = I40E_SUCCESS;
6532         struct i40e_tx_queue *txq;
6533
6534         for (i = 0; i < data->nb_tx_queues; i++) {
6535                 txq = data->tx_queues[i];
6536                 if (!txq || !txq->q_set)
6537                         continue;
6538                 ret = i40e_tx_queue_init(txq);
6539                 if (ret != I40E_SUCCESS)
6540                         break;
6541         }
6542         if (ret == I40E_SUCCESS)
6543                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6544                                      ->eth_dev);
6545
6546         return ret;
6547 }
6548
6549 /* Initialize VSI for RX */
6550 static int
6551 i40e_dev_rx_init(struct i40e_pf *pf)
6552 {
6553         struct rte_eth_dev_data *data = pf->dev_data;
6554         int ret = I40E_SUCCESS;
6555         uint16_t i;
6556         struct i40e_rx_queue *rxq;
6557
6558         i40e_pf_config_rss(pf);
6559         for (i = 0; i < data->nb_rx_queues; i++) {
6560                 rxq = data->rx_queues[i];
6561                 if (!rxq || !rxq->q_set)
6562                         continue;
6563
6564                 ret = i40e_rx_queue_init(rxq);
6565                 if (ret != I40E_SUCCESS) {
6566                         PMD_DRV_LOG(ERR,
6567                                 "Failed to do RX queue initialization");
6568                         break;
6569                 }
6570         }
6571         if (ret == I40E_SUCCESS)
6572                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6573                                      ->eth_dev);
6574
6575         return ret;
6576 }
6577
6578 static int
6579 i40e_dev_rxtx_init(struct i40e_pf *pf)
6580 {
6581         int err;
6582
6583         err = i40e_dev_tx_init(pf);
6584         if (err) {
6585                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6586                 return err;
6587         }
6588         err = i40e_dev_rx_init(pf);
6589         if (err) {
6590                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6591                 return err;
6592         }
6593
6594         return err;
6595 }
6596
6597 static int
6598 i40e_vmdq_setup(struct rte_eth_dev *dev)
6599 {
6600         struct rte_eth_conf *conf = &dev->data->dev_conf;
6601         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6602         int i, err, conf_vsis, j, loop;
6603         struct i40e_vsi *vsi;
6604         struct i40e_vmdq_info *vmdq_info;
6605         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6606         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6607
6608         /*
6609          * Disable interrupt to avoid message from VF. Furthermore, it will
6610          * avoid race condition in VSI creation/destroy.
6611          */
6612         i40e_pf_disable_irq0(hw);
6613
6614         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6615                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6616                 return -ENOTSUP;
6617         }
6618
6619         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6620         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6621                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6622                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6623                         pf->max_nb_vmdq_vsi);
6624                 return -ENOTSUP;
6625         }
6626
6627         if (pf->vmdq != NULL) {
6628                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6629                 return 0;
6630         }
6631
6632         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6633                                 sizeof(*vmdq_info) * conf_vsis, 0);
6634
6635         if (pf->vmdq == NULL) {
6636                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6637                 return -ENOMEM;
6638         }
6639
6640         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6641
6642         /* Create VMDQ VSI */
6643         for (i = 0; i < conf_vsis; i++) {
6644                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6645                                 vmdq_conf->enable_loop_back);
6646                 if (vsi == NULL) {
6647                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6648                         err = -1;
6649                         goto err_vsi_setup;
6650                 }
6651                 vmdq_info = &pf->vmdq[i];
6652                 vmdq_info->pf = pf;
6653                 vmdq_info->vsi = vsi;
6654         }
6655         pf->nb_cfg_vmdq_vsi = conf_vsis;
6656
6657         /* Configure Vlan */
6658         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6659         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6660                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6661                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6662                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6663                                         vmdq_conf->pool_map[i].vlan_id, j);
6664
6665                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6666                                                 vmdq_conf->pool_map[i].vlan_id);
6667                                 if (err) {
6668                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6669                                         err = -1;
6670                                         goto err_vsi_setup;
6671                                 }
6672                         }
6673                 }
6674         }
6675
6676         i40e_pf_enable_irq0(hw);
6677
6678         return 0;
6679
6680 err_vsi_setup:
6681         for (i = 0; i < conf_vsis; i++)
6682                 if (pf->vmdq[i].vsi == NULL)
6683                         break;
6684                 else
6685                         i40e_vsi_release(pf->vmdq[i].vsi);
6686
6687         rte_free(pf->vmdq);
6688         pf->vmdq = NULL;
6689         i40e_pf_enable_irq0(hw);
6690         return err;
6691 }
6692
6693 static void
6694 i40e_stat_update_32(struct i40e_hw *hw,
6695                    uint32_t reg,
6696                    bool offset_loaded,
6697                    uint64_t *offset,
6698                    uint64_t *stat)
6699 {
6700         uint64_t new_data;
6701
6702         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6703         if (!offset_loaded)
6704                 *offset = new_data;
6705
6706         if (new_data >= *offset)
6707                 *stat = (uint64_t)(new_data - *offset);
6708         else
6709                 *stat = (uint64_t)((new_data +
6710                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6711 }
6712
6713 static void
6714 i40e_stat_update_48(struct i40e_hw *hw,
6715                    uint32_t hireg,
6716                    uint32_t loreg,
6717                    bool offset_loaded,
6718                    uint64_t *offset,
6719                    uint64_t *stat)
6720 {
6721         uint64_t new_data;
6722
6723         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6724         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6725                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6726
6727         if (!offset_loaded)
6728                 *offset = new_data;
6729
6730         if (new_data >= *offset)
6731                 *stat = new_data - *offset;
6732         else
6733                 *stat = (uint64_t)((new_data +
6734                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6735
6736         *stat &= I40E_48_BIT_MASK;
6737 }
6738
6739 /* Disable IRQ0 */
6740 void
6741 i40e_pf_disable_irq0(struct i40e_hw *hw)
6742 {
6743         /* Disable all interrupt types */
6744         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6745                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6746         I40E_WRITE_FLUSH(hw);
6747 }
6748
6749 /* Enable IRQ0 */
6750 void
6751 i40e_pf_enable_irq0(struct i40e_hw *hw)
6752 {
6753         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6754                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6755                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6756                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6757         I40E_WRITE_FLUSH(hw);
6758 }
6759
6760 static void
6761 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6762 {
6763         /* read pending request and disable first */
6764         i40e_pf_disable_irq0(hw);
6765         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6766         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6767                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6768
6769         if (no_queue)
6770                 /* Link no queues with irq0 */
6771                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6772                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6773 }
6774
6775 static void
6776 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6777 {
6778         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6779         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6780         int i;
6781         uint16_t abs_vf_id;
6782         uint32_t index, offset, val;
6783
6784         if (!pf->vfs)
6785                 return;
6786         /**
6787          * Try to find which VF trigger a reset, use absolute VF id to access
6788          * since the reg is global register.
6789          */
6790         for (i = 0; i < pf->vf_num; i++) {
6791                 abs_vf_id = hw->func_caps.vf_base_id + i;
6792                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6793                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6794                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6795                 /* VFR event occurred */
6796                 if (val & (0x1 << offset)) {
6797                         int ret;
6798
6799                         /* Clear the event first */
6800                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6801                                                         (0x1 << offset));
6802                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6803                         /**
6804                          * Only notify a VF reset event occurred,
6805                          * don't trigger another SW reset
6806                          */
6807                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6808                         if (ret != I40E_SUCCESS)
6809                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6810                 }
6811         }
6812 }
6813
6814 static void
6815 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6816 {
6817         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6818         int i;
6819
6820         for (i = 0; i < pf->vf_num; i++)
6821                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6822 }
6823
6824 static void
6825 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6826 {
6827         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6828         struct i40e_arq_event_info info;
6829         uint16_t pending, opcode;
6830         int ret;
6831
6832         info.buf_len = I40E_AQ_BUF_SZ;
6833         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6834         if (!info.msg_buf) {
6835                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6836                 return;
6837         }
6838
6839         pending = 1;
6840         while (pending) {
6841                 ret = i40e_clean_arq_element(hw, &info, &pending);
6842
6843                 if (ret != I40E_SUCCESS) {
6844                         PMD_DRV_LOG(INFO,
6845                                 "Failed to read msg from AdminQ, aq_err: %u",
6846                                 hw->aq.asq_last_status);
6847                         break;
6848                 }
6849                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6850
6851                 switch (opcode) {
6852                 case i40e_aqc_opc_send_msg_to_pf:
6853                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6854                         i40e_pf_host_handle_vf_msg(dev,
6855                                         rte_le_to_cpu_16(info.desc.retval),
6856                                         rte_le_to_cpu_32(info.desc.cookie_high),
6857                                         rte_le_to_cpu_32(info.desc.cookie_low),
6858                                         info.msg_buf,
6859                                         info.msg_len);
6860                         break;
6861                 case i40e_aqc_opc_get_link_status:
6862                         ret = i40e_dev_link_update(dev, 0);
6863                         if (!ret)
6864                                 rte_eth_dev_callback_process(dev,
6865                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6866                         break;
6867                 default:
6868                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6869                                     opcode);
6870                         break;
6871                 }
6872         }
6873         rte_free(info.msg_buf);
6874 }
6875
6876 static void
6877 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6878 {
6879 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6880 #define I40E_MDD_CLEAR16 0xFFFF
6881         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6882         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6883         bool mdd_detected = false;
6884         struct i40e_pf_vf *vf;
6885         uint32_t reg;
6886         int i;
6887
6888         /* find what triggered the MDD event */
6889         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6890         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6891                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6892                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6893                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6894                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6895                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6896                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6897                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6898                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6899                                         hw->func_caps.base_queue;
6900                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6901                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6902                                 event, queue, pf_num, vf_num, dev->data->name);
6903                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6904                 mdd_detected = true;
6905         }
6906         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6907         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6908                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6909                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6910                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6911                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6912                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6913                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6914                                         hw->func_caps.base_queue;
6915
6916                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6917                                 "queue %d of function 0x%02x device %s\n",
6918                                         event, queue, func, dev->data->name);
6919                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6920                 mdd_detected = true;
6921         }
6922
6923         if (mdd_detected) {
6924                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6925                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6926                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6927                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6928                 }
6929                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6930                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6931                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6932                                         I40E_MDD_CLEAR16);
6933                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6934                 }
6935         }
6936
6937         /* see if one of the VFs needs its hand slapped */
6938         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6939                 vf = &pf->vfs[i];
6940                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6941                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6942                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6943                                         I40E_MDD_CLEAR16);
6944                         vf->num_mdd_events++;
6945                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6946                                         PRIu64 "times\n",
6947                                         i, vf->num_mdd_events);
6948                 }
6949
6950                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6951                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6952                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6953                                         I40E_MDD_CLEAR16);
6954                         vf->num_mdd_events++;
6955                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6956                                         PRIu64 "times\n",
6957                                         i, vf->num_mdd_events);
6958                 }
6959         }
6960 }
6961
6962 /**
6963  * Interrupt handler triggered by NIC  for handling
6964  * specific interrupt.
6965  *
6966  * @param handle
6967  *  Pointer to interrupt handle.
6968  * @param param
6969  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6970  *
6971  * @return
6972  *  void
6973  */
6974 static void
6975 i40e_dev_interrupt_handler(void *param)
6976 {
6977         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6978         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6979         uint32_t icr0;
6980
6981         /* Disable interrupt */
6982         i40e_pf_disable_irq0(hw);
6983
6984         /* read out interrupt causes */
6985         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6986
6987         /* No interrupt event indicated */
6988         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6989                 PMD_DRV_LOG(INFO, "No interrupt event");
6990                 goto done;
6991         }
6992         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6993                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6994         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6995                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6996                 i40e_handle_mdd_event(dev);
6997         }
6998         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6999                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
7000         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
7001                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
7002         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
7003                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7004         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7005                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
7006         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7007                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7008
7009         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7010                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7011                 i40e_dev_handle_vfr_event(dev);
7012         }
7013         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7014                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
7015                 i40e_dev_handle_aq_msg(dev);
7016         }
7017
7018 done:
7019         /* Enable interrupt */
7020         i40e_pf_enable_irq0(hw);
7021 }
7022
7023 static void
7024 i40e_dev_alarm_handler(void *param)
7025 {
7026         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
7027         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7028         uint32_t icr0;
7029
7030         /* Disable interrupt */
7031         i40e_pf_disable_irq0(hw);
7032
7033         /* read out interrupt causes */
7034         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
7035
7036         /* No interrupt event indicated */
7037         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
7038                 goto done;
7039         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
7040                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
7041         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
7042                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
7043                 i40e_handle_mdd_event(dev);
7044         }
7045         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
7046                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
7047         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
7048                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
7049         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
7050                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7051         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7052                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
7053         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7054                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7055
7056         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7057                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7058                 i40e_dev_handle_vfr_event(dev);
7059         }
7060         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7061                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
7062                 i40e_dev_handle_aq_msg(dev);
7063         }
7064
7065 done:
7066         /* Enable interrupt */
7067         i40e_pf_enable_irq0(hw);
7068         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
7069                           i40e_dev_alarm_handler, dev);
7070 }
7071
7072 int
7073 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
7074                          struct i40e_macvlan_filter *filter,
7075                          int total)
7076 {
7077         int ele_num, ele_buff_size;
7078         int num, actual_num, i;
7079         uint16_t flags;
7080         int ret = I40E_SUCCESS;
7081         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7082         struct i40e_aqc_add_macvlan_element_data *req_list;
7083
7084         if (filter == NULL  || total == 0)
7085                 return I40E_ERR_PARAM;
7086         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7087         ele_buff_size = hw->aq.asq_buf_size;
7088
7089         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
7090         if (req_list == NULL) {
7091                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7092                 return I40E_ERR_NO_MEMORY;
7093         }
7094
7095         num = 0;
7096         do {
7097                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7098                 memset(req_list, 0, ele_buff_size);
7099
7100                 for (i = 0; i < actual_num; i++) {
7101                         rte_memcpy(req_list[i].mac_addr,
7102                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7103                         req_list[i].vlan_tag =
7104                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7105
7106                         switch (filter[num + i].filter_type) {
7107                         case RTE_MAC_PERFECT_MATCH:
7108                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
7109                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7110                                 break;
7111                         case RTE_MACVLAN_PERFECT_MATCH:
7112                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7113                                 break;
7114                         case RTE_MAC_HASH_MATCH:
7115                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
7116                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7117                                 break;
7118                         case RTE_MACVLAN_HASH_MATCH:
7119                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7120                                 break;
7121                         default:
7122                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
7123                                 ret = I40E_ERR_PARAM;
7124                                 goto DONE;
7125                         }
7126
7127                         req_list[i].queue_number = 0;
7128
7129                         req_list[i].flags = rte_cpu_to_le_16(flags);
7130                 }
7131
7132                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7133                                                 actual_num, NULL);
7134                 if (ret != I40E_SUCCESS) {
7135                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7136                         goto DONE;
7137                 }
7138                 num += actual_num;
7139         } while (num < total);
7140
7141 DONE:
7142         rte_free(req_list);
7143         return ret;
7144 }
7145
7146 int
7147 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7148                             struct i40e_macvlan_filter *filter,
7149                             int total)
7150 {
7151         int ele_num, ele_buff_size;
7152         int num, actual_num, i;
7153         uint16_t flags;
7154         int ret = I40E_SUCCESS;
7155         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7156         struct i40e_aqc_remove_macvlan_element_data *req_list;
7157
7158         if (filter == NULL  || total == 0)
7159                 return I40E_ERR_PARAM;
7160
7161         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7162         ele_buff_size = hw->aq.asq_buf_size;
7163
7164         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7165         if (req_list == NULL) {
7166                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7167                 return I40E_ERR_NO_MEMORY;
7168         }
7169
7170         num = 0;
7171         do {
7172                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7173                 memset(req_list, 0, ele_buff_size);
7174
7175                 for (i = 0; i < actual_num; i++) {
7176                         rte_memcpy(req_list[i].mac_addr,
7177                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7178                         req_list[i].vlan_tag =
7179                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7180
7181                         switch (filter[num + i].filter_type) {
7182                         case RTE_MAC_PERFECT_MATCH:
7183                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7184                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7185                                 break;
7186                         case RTE_MACVLAN_PERFECT_MATCH:
7187                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7188                                 break;
7189                         case RTE_MAC_HASH_MATCH:
7190                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7191                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7192                                 break;
7193                         case RTE_MACVLAN_HASH_MATCH:
7194                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7195                                 break;
7196                         default:
7197                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7198                                 ret = I40E_ERR_PARAM;
7199                                 goto DONE;
7200                         }
7201                         req_list[i].flags = rte_cpu_to_le_16(flags);
7202                 }
7203
7204                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7205                                                 actual_num, NULL);
7206                 if (ret != I40E_SUCCESS) {
7207                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7208                         goto DONE;
7209                 }
7210                 num += actual_num;
7211         } while (num < total);
7212
7213 DONE:
7214         rte_free(req_list);
7215         return ret;
7216 }
7217
7218 /* Find out specific MAC filter */
7219 static struct i40e_mac_filter *
7220 i40e_find_mac_filter(struct i40e_vsi *vsi,
7221                          struct rte_ether_addr *macaddr)
7222 {
7223         struct i40e_mac_filter *f;
7224
7225         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7226                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7227                         return f;
7228         }
7229
7230         return NULL;
7231 }
7232
7233 static bool
7234 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7235                          uint16_t vlan_id)
7236 {
7237         uint32_t vid_idx, vid_bit;
7238
7239         if (vlan_id > ETH_VLAN_ID_MAX)
7240                 return 0;
7241
7242         vid_idx = I40E_VFTA_IDX(vlan_id);
7243         vid_bit = I40E_VFTA_BIT(vlan_id);
7244
7245         if (vsi->vfta[vid_idx] & vid_bit)
7246                 return 1;
7247         else
7248                 return 0;
7249 }
7250
7251 static void
7252 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7253                        uint16_t vlan_id, bool on)
7254 {
7255         uint32_t vid_idx, vid_bit;
7256
7257         vid_idx = I40E_VFTA_IDX(vlan_id);
7258         vid_bit = I40E_VFTA_BIT(vlan_id);
7259
7260         if (on)
7261                 vsi->vfta[vid_idx] |= vid_bit;
7262         else
7263                 vsi->vfta[vid_idx] &= ~vid_bit;
7264 }
7265
7266 void
7267 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7268                      uint16_t vlan_id, bool on)
7269 {
7270         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7271         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7272         int ret;
7273
7274         if (vlan_id > ETH_VLAN_ID_MAX)
7275                 return;
7276
7277         i40e_store_vlan_filter(vsi, vlan_id, on);
7278
7279         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7280                 return;
7281
7282         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7283
7284         if (on) {
7285                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7286                                        &vlan_data, 1, NULL);
7287                 if (ret != I40E_SUCCESS)
7288                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7289         } else {
7290                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7291                                           &vlan_data, 1, NULL);
7292                 if (ret != I40E_SUCCESS)
7293                         PMD_DRV_LOG(ERR,
7294                                     "Failed to remove vlan filter");
7295         }
7296 }
7297
7298 /**
7299  * Find all vlan options for specific mac addr,
7300  * return with actual vlan found.
7301  */
7302 int
7303 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7304                            struct i40e_macvlan_filter *mv_f,
7305                            int num, struct rte_ether_addr *addr)
7306 {
7307         int i;
7308         uint32_t j, k;
7309
7310         /**
7311          * Not to use i40e_find_vlan_filter to decrease the loop time,
7312          * although the code looks complex.
7313           */
7314         if (num < vsi->vlan_num)
7315                 return I40E_ERR_PARAM;
7316
7317         i = 0;
7318         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7319                 if (vsi->vfta[j]) {
7320                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7321                                 if (vsi->vfta[j] & (1 << k)) {
7322                                         if (i > num - 1) {
7323                                                 PMD_DRV_LOG(ERR,
7324                                                         "vlan number doesn't match");
7325                                                 return I40E_ERR_PARAM;
7326                                         }
7327                                         rte_memcpy(&mv_f[i].macaddr,
7328                                                         addr, ETH_ADDR_LEN);
7329                                         mv_f[i].vlan_id =
7330                                                 j * I40E_UINT32_BIT_SIZE + k;
7331                                         i++;
7332                                 }
7333                         }
7334                 }
7335         }
7336         return I40E_SUCCESS;
7337 }
7338
7339 static inline int
7340 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7341                            struct i40e_macvlan_filter *mv_f,
7342                            int num,
7343                            uint16_t vlan)
7344 {
7345         int i = 0;
7346         struct i40e_mac_filter *f;
7347
7348         if (num < vsi->mac_num)
7349                 return I40E_ERR_PARAM;
7350
7351         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7352                 if (i > num - 1) {
7353                         PMD_DRV_LOG(ERR, "buffer number not match");
7354                         return I40E_ERR_PARAM;
7355                 }
7356                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7357                                 ETH_ADDR_LEN);
7358                 mv_f[i].vlan_id = vlan;
7359                 mv_f[i].filter_type = f->mac_info.filter_type;
7360                 i++;
7361         }
7362
7363         return I40E_SUCCESS;
7364 }
7365
7366 static int
7367 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7368 {
7369         int i, j, num;
7370         struct i40e_mac_filter *f;
7371         struct i40e_macvlan_filter *mv_f;
7372         int ret = I40E_SUCCESS;
7373
7374         if (vsi == NULL || vsi->mac_num == 0)
7375                 return I40E_ERR_PARAM;
7376
7377         /* Case that no vlan is set */
7378         if (vsi->vlan_num == 0)
7379                 num = vsi->mac_num;
7380         else
7381                 num = vsi->mac_num * vsi->vlan_num;
7382
7383         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7384         if (mv_f == NULL) {
7385                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7386                 return I40E_ERR_NO_MEMORY;
7387         }
7388
7389         i = 0;
7390         if (vsi->vlan_num == 0) {
7391                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7392                         rte_memcpy(&mv_f[i].macaddr,
7393                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7394                         mv_f[i].filter_type = f->mac_info.filter_type;
7395                         mv_f[i].vlan_id = 0;
7396                         i++;
7397                 }
7398         } else {
7399                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7400                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7401                                         vsi->vlan_num, &f->mac_info.mac_addr);
7402                         if (ret != I40E_SUCCESS)
7403                                 goto DONE;
7404                         for (j = i; j < i + vsi->vlan_num; j++)
7405                                 mv_f[j].filter_type = f->mac_info.filter_type;
7406                         i += vsi->vlan_num;
7407                 }
7408         }
7409
7410         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7411 DONE:
7412         rte_free(mv_f);
7413
7414         return ret;
7415 }
7416
7417 int
7418 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7419 {
7420         struct i40e_macvlan_filter *mv_f;
7421         int mac_num;
7422         int ret = I40E_SUCCESS;
7423
7424         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7425                 return I40E_ERR_PARAM;
7426
7427         /* If it's already set, just return */
7428         if (i40e_find_vlan_filter(vsi,vlan))
7429                 return I40E_SUCCESS;
7430
7431         mac_num = vsi->mac_num;
7432
7433         if (mac_num == 0) {
7434                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7435                 return I40E_ERR_PARAM;
7436         }
7437
7438         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7439
7440         if (mv_f == NULL) {
7441                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7442                 return I40E_ERR_NO_MEMORY;
7443         }
7444
7445         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7446
7447         if (ret != I40E_SUCCESS)
7448                 goto DONE;
7449
7450         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7451
7452         if (ret != I40E_SUCCESS)
7453                 goto DONE;
7454
7455         i40e_set_vlan_filter(vsi, vlan, 1);
7456
7457         vsi->vlan_num++;
7458         ret = I40E_SUCCESS;
7459 DONE:
7460         rte_free(mv_f);
7461         return ret;
7462 }
7463
7464 int
7465 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7466 {
7467         struct i40e_macvlan_filter *mv_f;
7468         int mac_num;
7469         int ret = I40E_SUCCESS;
7470
7471         /**
7472          * Vlan 0 is the generic filter for untagged packets
7473          * and can't be removed.
7474          */
7475         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7476                 return I40E_ERR_PARAM;
7477
7478         /* If can't find it, just return */
7479         if (!i40e_find_vlan_filter(vsi, vlan))
7480                 return I40E_ERR_PARAM;
7481
7482         mac_num = vsi->mac_num;
7483
7484         if (mac_num == 0) {
7485                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7486                 return I40E_ERR_PARAM;
7487         }
7488
7489         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7490
7491         if (mv_f == NULL) {
7492                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7493                 return I40E_ERR_NO_MEMORY;
7494         }
7495
7496         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7497
7498         if (ret != I40E_SUCCESS)
7499                 goto DONE;
7500
7501         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7502
7503         if (ret != I40E_SUCCESS)
7504                 goto DONE;
7505
7506         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7507         if (vsi->vlan_num == 1) {
7508                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7509                 if (ret != I40E_SUCCESS)
7510                         goto DONE;
7511
7512                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7513                 if (ret != I40E_SUCCESS)
7514                         goto DONE;
7515         }
7516
7517         i40e_set_vlan_filter(vsi, vlan, 0);
7518
7519         vsi->vlan_num--;
7520         ret = I40E_SUCCESS;
7521 DONE:
7522         rte_free(mv_f);
7523         return ret;
7524 }
7525
7526 int
7527 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7528 {
7529         struct i40e_mac_filter *f;
7530         struct i40e_macvlan_filter *mv_f;
7531         int i, vlan_num = 0;
7532         int ret = I40E_SUCCESS;
7533
7534         /* If it's add and we've config it, return */
7535         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7536         if (f != NULL)
7537                 return I40E_SUCCESS;
7538         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
7539                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
7540
7541                 /**
7542                  * If vlan_num is 0, that's the first time to add mac,
7543                  * set mask for vlan_id 0.
7544                  */
7545                 if (vsi->vlan_num == 0) {
7546                         i40e_set_vlan_filter(vsi, 0, 1);
7547                         vsi->vlan_num = 1;
7548                 }
7549                 vlan_num = vsi->vlan_num;
7550         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
7551                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
7552                 vlan_num = 1;
7553
7554         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7555         if (mv_f == NULL) {
7556                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7557                 return I40E_ERR_NO_MEMORY;
7558         }
7559
7560         for (i = 0; i < vlan_num; i++) {
7561                 mv_f[i].filter_type = mac_filter->filter_type;
7562                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7563                                 ETH_ADDR_LEN);
7564         }
7565
7566         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7567                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
7568                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7569                                         &mac_filter->mac_addr);
7570                 if (ret != I40E_SUCCESS)
7571                         goto DONE;
7572         }
7573
7574         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7575         if (ret != I40E_SUCCESS)
7576                 goto DONE;
7577
7578         /* Add the mac addr into mac list */
7579         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7580         if (f == NULL) {
7581                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7582                 ret = I40E_ERR_NO_MEMORY;
7583                 goto DONE;
7584         }
7585         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7586                         ETH_ADDR_LEN);
7587         f->mac_info.filter_type = mac_filter->filter_type;
7588         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7589         vsi->mac_num++;
7590
7591         ret = I40E_SUCCESS;
7592 DONE:
7593         rte_free(mv_f);
7594
7595         return ret;
7596 }
7597
7598 int
7599 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7600 {
7601         struct i40e_mac_filter *f;
7602         struct i40e_macvlan_filter *mv_f;
7603         int i, vlan_num;
7604         enum rte_mac_filter_type filter_type;
7605         int ret = I40E_SUCCESS;
7606
7607         /* Can't find it, return an error */
7608         f = i40e_find_mac_filter(vsi, addr);
7609         if (f == NULL)
7610                 return I40E_ERR_PARAM;
7611
7612         vlan_num = vsi->vlan_num;
7613         filter_type = f->mac_info.filter_type;
7614         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7615                 filter_type == RTE_MACVLAN_HASH_MATCH) {
7616                 if (vlan_num == 0) {
7617                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7618                         return I40E_ERR_PARAM;
7619                 }
7620         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7621                         filter_type == RTE_MAC_HASH_MATCH)
7622                 vlan_num = 1;
7623
7624         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7625         if (mv_f == NULL) {
7626                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7627                 return I40E_ERR_NO_MEMORY;
7628         }
7629
7630         for (i = 0; i < vlan_num; i++) {
7631                 mv_f[i].filter_type = filter_type;
7632                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7633                                 ETH_ADDR_LEN);
7634         }
7635         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7636                         filter_type == RTE_MACVLAN_HASH_MATCH) {
7637                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7638                 if (ret != I40E_SUCCESS)
7639                         goto DONE;
7640         }
7641
7642         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7643         if (ret != I40E_SUCCESS)
7644                 goto DONE;
7645
7646         /* Remove the mac addr into mac list */
7647         TAILQ_REMOVE(&vsi->mac_list, f, next);
7648         rte_free(f);
7649         vsi->mac_num--;
7650
7651         ret = I40E_SUCCESS;
7652 DONE:
7653         rte_free(mv_f);
7654         return ret;
7655 }
7656
7657 /* Configure hash enable flags for RSS */
7658 uint64_t
7659 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7660 {
7661         uint64_t hena = 0;
7662         int i;
7663
7664         if (!flags)
7665                 return hena;
7666
7667         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7668                 if (flags & (1ULL << i))
7669                         hena |= adapter->pctypes_tbl[i];
7670         }
7671
7672         return hena;
7673 }
7674
7675 /* Parse the hash enable flags */
7676 uint64_t
7677 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7678 {
7679         uint64_t rss_hf = 0;
7680
7681         if (!flags)
7682                 return rss_hf;
7683         int i;
7684
7685         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7686                 if (flags & adapter->pctypes_tbl[i])
7687                         rss_hf |= (1ULL << i);
7688         }
7689         return rss_hf;
7690 }
7691
7692 /* Disable RSS */
7693 static void
7694 i40e_pf_disable_rss(struct i40e_pf *pf)
7695 {
7696         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7697
7698         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7699         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7700         I40E_WRITE_FLUSH(hw);
7701 }
7702
7703 int
7704 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7705 {
7706         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7707         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7708         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7709                            I40E_VFQF_HKEY_MAX_INDEX :
7710                            I40E_PFQF_HKEY_MAX_INDEX;
7711         int ret = 0;
7712
7713         if (!key || key_len == 0) {
7714                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7715                 return 0;
7716         } else if (key_len != (key_idx + 1) *
7717                 sizeof(uint32_t)) {
7718                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7719                 return -EINVAL;
7720         }
7721
7722         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7723                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7724                         (struct i40e_aqc_get_set_rss_key_data *)key;
7725
7726                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7727                 if (ret)
7728                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7729         } else {
7730                 uint32_t *hash_key = (uint32_t *)key;
7731                 uint16_t i;
7732
7733                 if (vsi->type == I40E_VSI_SRIOV) {
7734                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7735                                 I40E_WRITE_REG(
7736                                         hw,
7737                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7738                                         hash_key[i]);
7739
7740                 } else {
7741                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7742                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7743                                                hash_key[i]);
7744                 }
7745                 I40E_WRITE_FLUSH(hw);
7746         }
7747
7748         return ret;
7749 }
7750
7751 static int
7752 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7753 {
7754         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7755         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7756         uint32_t reg;
7757         int ret;
7758
7759         if (!key || !key_len)
7760                 return 0;
7761
7762         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7763                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7764                         (struct i40e_aqc_get_set_rss_key_data *)key);
7765                 if (ret) {
7766                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7767                         return ret;
7768                 }
7769         } else {
7770                 uint32_t *key_dw = (uint32_t *)key;
7771                 uint16_t i;
7772
7773                 if (vsi->type == I40E_VSI_SRIOV) {
7774                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7775                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7776                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7777                         }
7778                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7779                                    sizeof(uint32_t);
7780                 } else {
7781                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7782                                 reg = I40E_PFQF_HKEY(i);
7783                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7784                         }
7785                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7786                                    sizeof(uint32_t);
7787                 }
7788         }
7789         return 0;
7790 }
7791
7792 static int
7793 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7794 {
7795         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7796         uint64_t hena;
7797         int ret;
7798
7799         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7800                                rss_conf->rss_key_len);
7801         if (ret)
7802                 return ret;
7803
7804         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7805         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7806         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7807         I40E_WRITE_FLUSH(hw);
7808
7809         return 0;
7810 }
7811
7812 static int
7813 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7814                          struct rte_eth_rss_conf *rss_conf)
7815 {
7816         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7817         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7818         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7819         uint64_t hena;
7820
7821         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7822         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7823
7824         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7825                 if (rss_hf != 0) /* Enable RSS */
7826                         return -EINVAL;
7827                 return 0; /* Nothing to do */
7828         }
7829         /* RSS enabled */
7830         if (rss_hf == 0) /* Disable RSS */
7831                 return -EINVAL;
7832
7833         return i40e_hw_rss_hash_set(pf, rss_conf);
7834 }
7835
7836 static int
7837 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7838                            struct rte_eth_rss_conf *rss_conf)
7839 {
7840         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7841         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7842         uint64_t hena;
7843         int ret;
7844
7845         if (!rss_conf)
7846                 return -EINVAL;
7847
7848         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7849                          &rss_conf->rss_key_len);
7850         if (ret)
7851                 return ret;
7852
7853         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7854         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7855         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7856
7857         return 0;
7858 }
7859
7860 static int
7861 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7862 {
7863         switch (filter_type) {
7864         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7865                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7866                 break;
7867         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7868                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7869                 break;
7870         case RTE_TUNNEL_FILTER_IMAC_TENID:
7871                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7872                 break;
7873         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7874                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7875                 break;
7876         case ETH_TUNNEL_FILTER_IMAC:
7877                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7878                 break;
7879         case ETH_TUNNEL_FILTER_OIP:
7880                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7881                 break;
7882         case ETH_TUNNEL_FILTER_IIP:
7883                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7884                 break;
7885         default:
7886                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7887                 return -EINVAL;
7888         }
7889
7890         return 0;
7891 }
7892
7893 /* Convert tunnel filter structure */
7894 static int
7895 i40e_tunnel_filter_convert(
7896         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7897         struct i40e_tunnel_filter *tunnel_filter)
7898 {
7899         rte_ether_addr_copy((struct rte_ether_addr *)
7900                         &cld_filter->element.outer_mac,
7901                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7902         rte_ether_addr_copy((struct rte_ether_addr *)
7903                         &cld_filter->element.inner_mac,
7904                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7905         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7906         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7907              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7908             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7909                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7910         else
7911                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7912         tunnel_filter->input.flags = cld_filter->element.flags;
7913         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7914         tunnel_filter->queue = cld_filter->element.queue_number;
7915         rte_memcpy(tunnel_filter->input.general_fields,
7916                    cld_filter->general_fields,
7917                    sizeof(cld_filter->general_fields));
7918
7919         return 0;
7920 }
7921
7922 /* Check if there exists the tunnel filter */
7923 struct i40e_tunnel_filter *
7924 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7925                              const struct i40e_tunnel_filter_input *input)
7926 {
7927         int ret;
7928
7929         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7930         if (ret < 0)
7931                 return NULL;
7932
7933         return tunnel_rule->hash_map[ret];
7934 }
7935
7936 /* Add a tunnel filter into the SW list */
7937 static int
7938 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7939                              struct i40e_tunnel_filter *tunnel_filter)
7940 {
7941         struct i40e_tunnel_rule *rule = &pf->tunnel;
7942         int ret;
7943
7944         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7945         if (ret < 0) {
7946                 PMD_DRV_LOG(ERR,
7947                             "Failed to insert tunnel filter to hash table %d!",
7948                             ret);
7949                 return ret;
7950         }
7951         rule->hash_map[ret] = tunnel_filter;
7952
7953         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7954
7955         return 0;
7956 }
7957
7958 /* Delete a tunnel filter from the SW list */
7959 int
7960 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7961                           struct i40e_tunnel_filter_input *input)
7962 {
7963         struct i40e_tunnel_rule *rule = &pf->tunnel;
7964         struct i40e_tunnel_filter *tunnel_filter;
7965         int ret;
7966
7967         ret = rte_hash_del_key(rule->hash_table, input);
7968         if (ret < 0) {
7969                 PMD_DRV_LOG(ERR,
7970                             "Failed to delete tunnel filter to hash table %d!",
7971                             ret);
7972                 return ret;
7973         }
7974         tunnel_filter = rule->hash_map[ret];
7975         rule->hash_map[ret] = NULL;
7976
7977         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7978         rte_free(tunnel_filter);
7979
7980         return 0;
7981 }
7982
7983 int
7984 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7985                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7986                         uint8_t add)
7987 {
7988         uint16_t ip_type;
7989         uint32_t ipv4_addr, ipv4_addr_le;
7990         uint8_t i, tun_type = 0;
7991         /* internal varialbe to convert ipv6 byte order */
7992         uint32_t convert_ipv6[4];
7993         int val, ret = 0;
7994         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7995         struct i40e_vsi *vsi = pf->main_vsi;
7996         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
7997         struct i40e_aqc_cloud_filters_element_bb *pfilter;
7998         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7999         struct i40e_tunnel_filter *tunnel, *node;
8000         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8001
8002         cld_filter = rte_zmalloc("tunnel_filter",
8003                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8004         0);
8005
8006         if (NULL == cld_filter) {
8007                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8008                 return -ENOMEM;
8009         }
8010         pfilter = cld_filter;
8011
8012         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8013                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8014         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8015                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8016
8017         pfilter->element.inner_vlan =
8018                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8019         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
8020                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8021                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8022                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8023                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8024                                 &ipv4_addr_le,
8025                                 sizeof(pfilter->element.ipaddr.v4.data));
8026         } else {
8027                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8028                 for (i = 0; i < 4; i++) {
8029                         convert_ipv6[i] =
8030                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
8031                 }
8032                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8033                            &convert_ipv6,
8034                            sizeof(pfilter->element.ipaddr.v6.data));
8035         }
8036
8037         /* check tunneled type */
8038         switch (tunnel_filter->tunnel_type) {
8039         case RTE_TUNNEL_TYPE_VXLAN:
8040                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8041                 break;
8042         case RTE_TUNNEL_TYPE_NVGRE:
8043                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8044                 break;
8045         case RTE_TUNNEL_TYPE_IP_IN_GRE:
8046                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8047                 break;
8048         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8049                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
8050                 break;
8051         default:
8052                 /* Other tunnel types is not supported. */
8053                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8054                 rte_free(cld_filter);
8055                 return -EINVAL;
8056         }
8057
8058         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8059                                        &pfilter->element.flags);
8060         if (val < 0) {
8061                 rte_free(cld_filter);
8062                 return -EINVAL;
8063         }
8064
8065         pfilter->element.flags |= rte_cpu_to_le_16(
8066                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8067                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8068         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8069         pfilter->element.queue_number =
8070                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8071
8072         /* Check if there is the filter in SW list */
8073         memset(&check_filter, 0, sizeof(check_filter));
8074         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8075         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8076         if (add && node) {
8077                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8078                 rte_free(cld_filter);
8079                 return -EINVAL;
8080         }
8081
8082         if (!add && !node) {
8083                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8084                 rte_free(cld_filter);
8085                 return -EINVAL;
8086         }
8087
8088         if (add) {
8089                 ret = i40e_aq_add_cloud_filters(hw,
8090                                         vsi->seid, &cld_filter->element, 1);
8091                 if (ret < 0) {
8092                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8093                         rte_free(cld_filter);
8094                         return -ENOTSUP;
8095                 }
8096                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8097                 if (tunnel == NULL) {
8098                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8099                         rte_free(cld_filter);
8100                         return -ENOMEM;
8101                 }
8102
8103                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8104                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8105                 if (ret < 0)
8106                         rte_free(tunnel);
8107         } else {
8108                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8109                                                    &cld_filter->element, 1);
8110                 if (ret < 0) {
8111                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8112                         rte_free(cld_filter);
8113                         return -ENOTSUP;
8114                 }
8115                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8116         }
8117
8118         rte_free(cld_filter);
8119         return ret;
8120 }
8121
8122 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
8123 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
8124 #define I40E_TR_GENEVE_KEY_MASK                 0x8
8125 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
8126 #define I40E_TR_GRE_KEY_MASK                    0x400
8127 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
8128 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
8129 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
8130 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
8131 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
8132 #define I40E_DIRECTION_INGRESS_KEY              0x8000
8133 #define I40E_TR_L4_TYPE_TCP                     0x2
8134 #define I40E_TR_L4_TYPE_UDP                     0x4
8135 #define I40E_TR_L4_TYPE_SCTP                    0x8
8136
8137 static enum
8138 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
8139 {
8140         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8141         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8142         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8143         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8144         enum i40e_status_code status = I40E_SUCCESS;
8145
8146         if (pf->support_multi_driver) {
8147                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8148                 return I40E_NOT_SUPPORTED;
8149         }
8150
8151         memset(&filter_replace, 0,
8152                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8153         memset(&filter_replace_buf, 0,
8154                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8155
8156         /* create L1 filter */
8157         filter_replace.old_filter_type =
8158                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8159         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8160         filter_replace.tr_bit = 0;
8161
8162         /* Prepare the buffer, 3 entries */
8163         filter_replace_buf.data[0] =
8164                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8165         filter_replace_buf.data[0] |=
8166                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8167         filter_replace_buf.data[2] = 0xFF;
8168         filter_replace_buf.data[3] = 0xFF;
8169         filter_replace_buf.data[4] =
8170                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8171         filter_replace_buf.data[4] |=
8172                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8173         filter_replace_buf.data[7] = 0xF0;
8174         filter_replace_buf.data[8]
8175                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
8176         filter_replace_buf.data[8] |=
8177                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8178         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
8179                 I40E_TR_GENEVE_KEY_MASK |
8180                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
8181         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
8182                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
8183                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
8184
8185         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8186                                                &filter_replace_buf);
8187         if (!status && (filter_replace.old_filter_type !=
8188                         filter_replace.new_filter_type))
8189                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8190                             " original: 0x%x, new: 0x%x",
8191                             dev->device->name,
8192                             filter_replace.old_filter_type,
8193                             filter_replace.new_filter_type);
8194
8195         return status;
8196 }
8197
8198 static enum
8199 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
8200 {
8201         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8202         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8203         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8204         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8205         enum i40e_status_code status = I40E_SUCCESS;
8206
8207         if (pf->support_multi_driver) {
8208                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8209                 return I40E_NOT_SUPPORTED;
8210         }
8211
8212         /* For MPLSoUDP */
8213         memset(&filter_replace, 0,
8214                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8215         memset(&filter_replace_buf, 0,
8216                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8217         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8218                 I40E_AQC_MIRROR_CLOUD_FILTER;
8219         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8220         filter_replace.new_filter_type =
8221                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8222         /* Prepare the buffer, 2 entries */
8223         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8224         filter_replace_buf.data[0] |=
8225                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8226         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8227         filter_replace_buf.data[4] |=
8228                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8229         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8230                                                &filter_replace_buf);
8231         if (status < 0)
8232                 return status;
8233         if (filter_replace.old_filter_type !=
8234             filter_replace.new_filter_type)
8235                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8236                             " original: 0x%x, new: 0x%x",
8237                             dev->device->name,
8238                             filter_replace.old_filter_type,
8239                             filter_replace.new_filter_type);
8240
8241         /* For MPLSoGRE */
8242         memset(&filter_replace, 0,
8243                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8244         memset(&filter_replace_buf, 0,
8245                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8246
8247         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8248                 I40E_AQC_MIRROR_CLOUD_FILTER;
8249         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
8250         filter_replace.new_filter_type =
8251                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8252         /* Prepare the buffer, 2 entries */
8253         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8254         filter_replace_buf.data[0] |=
8255                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8256         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8257         filter_replace_buf.data[4] |=
8258                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8259
8260         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8261                                                &filter_replace_buf);
8262         if (!status && (filter_replace.old_filter_type !=
8263                         filter_replace.new_filter_type))
8264                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8265                             " original: 0x%x, new: 0x%x",
8266                             dev->device->name,
8267                             filter_replace.old_filter_type,
8268                             filter_replace.new_filter_type);
8269
8270         return status;
8271 }
8272
8273 static enum i40e_status_code
8274 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8275 {
8276         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8277         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8278         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8279         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8280         enum i40e_status_code status = I40E_SUCCESS;
8281
8282         if (pf->support_multi_driver) {
8283                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8284                 return I40E_NOT_SUPPORTED;
8285         }
8286
8287         /* For GTP-C */
8288         memset(&filter_replace, 0,
8289                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8290         memset(&filter_replace_buf, 0,
8291                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8292         /* create L1 filter */
8293         filter_replace.old_filter_type =
8294                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8295         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8296         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8297                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8298         /* Prepare the buffer, 2 entries */
8299         filter_replace_buf.data[0] =
8300                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8301         filter_replace_buf.data[0] |=
8302                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8303         filter_replace_buf.data[2] = 0xFF;
8304         filter_replace_buf.data[3] = 0xFF;
8305         filter_replace_buf.data[4] =
8306                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8307         filter_replace_buf.data[4] |=
8308                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8309         filter_replace_buf.data[6] = 0xFF;
8310         filter_replace_buf.data[7] = 0xFF;
8311         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8312                                                &filter_replace_buf);
8313         if (status < 0)
8314                 return status;
8315         if (filter_replace.old_filter_type !=
8316             filter_replace.new_filter_type)
8317                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8318                             " original: 0x%x, new: 0x%x",
8319                             dev->device->name,
8320                             filter_replace.old_filter_type,
8321                             filter_replace.new_filter_type);
8322
8323         /* for GTP-U */
8324         memset(&filter_replace, 0,
8325                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8326         memset(&filter_replace_buf, 0,
8327                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8328         /* create L1 filter */
8329         filter_replace.old_filter_type =
8330                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8331         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8332         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8333                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8334         /* Prepare the buffer, 2 entries */
8335         filter_replace_buf.data[0] =
8336                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8337         filter_replace_buf.data[0] |=
8338                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8339         filter_replace_buf.data[2] = 0xFF;
8340         filter_replace_buf.data[3] = 0xFF;
8341         filter_replace_buf.data[4] =
8342                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8343         filter_replace_buf.data[4] |=
8344                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8345         filter_replace_buf.data[6] = 0xFF;
8346         filter_replace_buf.data[7] = 0xFF;
8347
8348         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8349                                                &filter_replace_buf);
8350         if (!status && (filter_replace.old_filter_type !=
8351                         filter_replace.new_filter_type))
8352                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8353                             " original: 0x%x, new: 0x%x",
8354                             dev->device->name,
8355                             filter_replace.old_filter_type,
8356                             filter_replace.new_filter_type);
8357
8358         return status;
8359 }
8360
8361 static enum
8362 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8363 {
8364         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8365         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8366         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8367         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8368         enum i40e_status_code status = I40E_SUCCESS;
8369
8370         if (pf->support_multi_driver) {
8371                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8372                 return I40E_NOT_SUPPORTED;
8373         }
8374
8375         /* for GTP-C */
8376         memset(&filter_replace, 0,
8377                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8378         memset(&filter_replace_buf, 0,
8379                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8380         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8381         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8382         filter_replace.new_filter_type =
8383                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8384         /* Prepare the buffer, 2 entries */
8385         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8386         filter_replace_buf.data[0] |=
8387                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8388         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8389         filter_replace_buf.data[4] |=
8390                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8391         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8392                                                &filter_replace_buf);
8393         if (status < 0)
8394                 return status;
8395         if (filter_replace.old_filter_type !=
8396             filter_replace.new_filter_type)
8397                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8398                             " original: 0x%x, new: 0x%x",
8399                             dev->device->name,
8400                             filter_replace.old_filter_type,
8401                             filter_replace.new_filter_type);
8402
8403         /* for GTP-U */
8404         memset(&filter_replace, 0,
8405                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8406         memset(&filter_replace_buf, 0,
8407                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8408         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8409         filter_replace.old_filter_type =
8410                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8411         filter_replace.new_filter_type =
8412                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8413         /* Prepare the buffer, 2 entries */
8414         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8415         filter_replace_buf.data[0] |=
8416                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8417         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8418         filter_replace_buf.data[4] |=
8419                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8420
8421         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8422                                                &filter_replace_buf);
8423         if (!status && (filter_replace.old_filter_type !=
8424                         filter_replace.new_filter_type))
8425                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8426                             " original: 0x%x, new: 0x%x",
8427                             dev->device->name,
8428                             filter_replace.old_filter_type,
8429                             filter_replace.new_filter_type);
8430
8431         return status;
8432 }
8433
8434 static enum i40e_status_code
8435 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8436                             enum i40e_l4_port_type l4_port_type)
8437 {
8438         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8439         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8440         enum i40e_status_code status = I40E_SUCCESS;
8441         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8442         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8443
8444         if (pf->support_multi_driver) {
8445                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8446                 return I40E_NOT_SUPPORTED;
8447         }
8448
8449         memset(&filter_replace, 0,
8450                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8451         memset(&filter_replace_buf, 0,
8452                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8453
8454         /* create L1 filter */
8455         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8456                 filter_replace.old_filter_type =
8457                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8458                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8459                 filter_replace_buf.data[8] =
8460                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8461         } else {
8462                 filter_replace.old_filter_type =
8463                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8464                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8465                 filter_replace_buf.data[8] =
8466                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8467         }
8468
8469         filter_replace.tr_bit = 0;
8470         /* Prepare the buffer, 3 entries */
8471         filter_replace_buf.data[0] =
8472                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8473         filter_replace_buf.data[0] |=
8474                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8475         filter_replace_buf.data[2] = 0x00;
8476         filter_replace_buf.data[3] =
8477                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8478         filter_replace_buf.data[4] =
8479                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8480         filter_replace_buf.data[4] |=
8481                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8482         filter_replace_buf.data[5] = 0x00;
8483         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8484                 I40E_TR_L4_TYPE_TCP |
8485                 I40E_TR_L4_TYPE_SCTP;
8486         filter_replace_buf.data[7] = 0x00;
8487         filter_replace_buf.data[8] |=
8488                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8489         filter_replace_buf.data[9] = 0x00;
8490         filter_replace_buf.data[10] = 0xFF;
8491         filter_replace_buf.data[11] = 0xFF;
8492
8493         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8494                                                &filter_replace_buf);
8495         if (!status && filter_replace.old_filter_type !=
8496             filter_replace.new_filter_type)
8497                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8498                             " original: 0x%x, new: 0x%x",
8499                             dev->device->name,
8500                             filter_replace.old_filter_type,
8501                             filter_replace.new_filter_type);
8502
8503         return status;
8504 }
8505
8506 static enum i40e_status_code
8507 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8508                                enum i40e_l4_port_type l4_port_type)
8509 {
8510         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8511         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8512         enum i40e_status_code status = I40E_SUCCESS;
8513         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8514         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8515
8516         if (pf->support_multi_driver) {
8517                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8518                 return I40E_NOT_SUPPORTED;
8519         }
8520
8521         memset(&filter_replace, 0,
8522                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8523         memset(&filter_replace_buf, 0,
8524                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8525
8526         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8527                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8528                 filter_replace.new_filter_type =
8529                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8530                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8531         } else {
8532                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8533                 filter_replace.new_filter_type =
8534                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8535                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8536         }
8537
8538         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8539         filter_replace.tr_bit = 0;
8540         /* Prepare the buffer, 2 entries */
8541         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8542         filter_replace_buf.data[0] |=
8543                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8544         filter_replace_buf.data[4] |=
8545                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8546         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8547                                                &filter_replace_buf);
8548
8549         if (!status && filter_replace.old_filter_type !=
8550             filter_replace.new_filter_type)
8551                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8552                             " original: 0x%x, new: 0x%x",
8553                             dev->device->name,
8554                             filter_replace.old_filter_type,
8555                             filter_replace.new_filter_type);
8556
8557         return status;
8558 }
8559
8560 int
8561 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8562                       struct i40e_tunnel_filter_conf *tunnel_filter,
8563                       uint8_t add)
8564 {
8565         uint16_t ip_type;
8566         uint32_t ipv4_addr, ipv4_addr_le;
8567         uint8_t i, tun_type = 0;
8568         /* internal variable to convert ipv6 byte order */
8569         uint32_t convert_ipv6[4];
8570         int val, ret = 0;
8571         struct i40e_pf_vf *vf = NULL;
8572         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8573         struct i40e_vsi *vsi;
8574         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8575         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8576         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8577         struct i40e_tunnel_filter *tunnel, *node;
8578         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8579         uint32_t teid_le;
8580         bool big_buffer = 0;
8581
8582         cld_filter = rte_zmalloc("tunnel_filter",
8583                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8584                          0);
8585
8586         if (cld_filter == NULL) {
8587                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8588                 return -ENOMEM;
8589         }
8590         pfilter = cld_filter;
8591
8592         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8593                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8594         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8595                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8596
8597         pfilter->element.inner_vlan =
8598                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8599         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8600                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8601                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8602                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8603                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8604                                 &ipv4_addr_le,
8605                                 sizeof(pfilter->element.ipaddr.v4.data));
8606         } else {
8607                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8608                 for (i = 0; i < 4; i++) {
8609                         convert_ipv6[i] =
8610                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8611                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8612                 }
8613                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8614                            &convert_ipv6,
8615                            sizeof(pfilter->element.ipaddr.v6.data));
8616         }
8617
8618         /* check tunneled type */
8619         switch (tunnel_filter->tunnel_type) {
8620         case I40E_TUNNEL_TYPE_VXLAN:
8621                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8622                 break;
8623         case I40E_TUNNEL_TYPE_NVGRE:
8624                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8625                 break;
8626         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8627                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8628                 break;
8629         case I40E_TUNNEL_TYPE_MPLSoUDP:
8630                 if (!pf->mpls_replace_flag) {
8631                         i40e_replace_mpls_l1_filter(pf);
8632                         i40e_replace_mpls_cloud_filter(pf);
8633                         pf->mpls_replace_flag = 1;
8634                 }
8635                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8636                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8637                         teid_le >> 4;
8638                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8639                         (teid_le & 0xF) << 12;
8640                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8641                         0x40;
8642                 big_buffer = 1;
8643                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8644                 break;
8645         case I40E_TUNNEL_TYPE_MPLSoGRE:
8646                 if (!pf->mpls_replace_flag) {
8647                         i40e_replace_mpls_l1_filter(pf);
8648                         i40e_replace_mpls_cloud_filter(pf);
8649                         pf->mpls_replace_flag = 1;
8650                 }
8651                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8652                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8653                         teid_le >> 4;
8654                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8655                         (teid_le & 0xF) << 12;
8656                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8657                         0x0;
8658                 big_buffer = 1;
8659                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8660                 break;
8661         case I40E_TUNNEL_TYPE_GTPC:
8662                 if (!pf->gtp_replace_flag) {
8663                         i40e_replace_gtp_l1_filter(pf);
8664                         i40e_replace_gtp_cloud_filter(pf);
8665                         pf->gtp_replace_flag = 1;
8666                 }
8667                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8668                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8669                         (teid_le >> 16) & 0xFFFF;
8670                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8671                         teid_le & 0xFFFF;
8672                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8673                         0x0;
8674                 big_buffer = 1;
8675                 break;
8676         case I40E_TUNNEL_TYPE_GTPU:
8677                 if (!pf->gtp_replace_flag) {
8678                         i40e_replace_gtp_l1_filter(pf);
8679                         i40e_replace_gtp_cloud_filter(pf);
8680                         pf->gtp_replace_flag = 1;
8681                 }
8682                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8683                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8684                         (teid_le >> 16) & 0xFFFF;
8685                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8686                         teid_le & 0xFFFF;
8687                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8688                         0x0;
8689                 big_buffer = 1;
8690                 break;
8691         case I40E_TUNNEL_TYPE_QINQ:
8692                 if (!pf->qinq_replace_flag) {
8693                         ret = i40e_cloud_filter_qinq_create(pf);
8694                         if (ret < 0)
8695                                 PMD_DRV_LOG(DEBUG,
8696                                             "QinQ tunnel filter already created.");
8697                         pf->qinq_replace_flag = 1;
8698                 }
8699                 /*      Add in the General fields the values of
8700                  *      the Outer and Inner VLAN
8701                  *      Big Buffer should be set, see changes in
8702                  *      i40e_aq_add_cloud_filters
8703                  */
8704                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8705                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8706                 big_buffer = 1;
8707                 break;
8708         case I40E_CLOUD_TYPE_UDP:
8709         case I40E_CLOUD_TYPE_TCP:
8710         case I40E_CLOUD_TYPE_SCTP:
8711                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8712                         if (!pf->sport_replace_flag) {
8713                                 i40e_replace_port_l1_filter(pf,
8714                                                 tunnel_filter->l4_port_type);
8715                                 i40e_replace_port_cloud_filter(pf,
8716                                                 tunnel_filter->l4_port_type);
8717                                 pf->sport_replace_flag = 1;
8718                         }
8719                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8720                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8721                                 I40E_DIRECTION_INGRESS_KEY;
8722
8723                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8724                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8725                                         I40E_TR_L4_TYPE_UDP;
8726                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8727                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8728                                         I40E_TR_L4_TYPE_TCP;
8729                         else
8730                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8731                                         I40E_TR_L4_TYPE_SCTP;
8732
8733                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8734                                 (teid_le >> 16) & 0xFFFF;
8735                         big_buffer = 1;
8736                 } else {
8737                         if (!pf->dport_replace_flag) {
8738                                 i40e_replace_port_l1_filter(pf,
8739                                                 tunnel_filter->l4_port_type);
8740                                 i40e_replace_port_cloud_filter(pf,
8741                                                 tunnel_filter->l4_port_type);
8742                                 pf->dport_replace_flag = 1;
8743                         }
8744                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8745                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8746                                 I40E_DIRECTION_INGRESS_KEY;
8747
8748                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8749                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8750                                         I40E_TR_L4_TYPE_UDP;
8751                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8752                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8753                                         I40E_TR_L4_TYPE_TCP;
8754                         else
8755                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8756                                         I40E_TR_L4_TYPE_SCTP;
8757
8758                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8759                                 (teid_le >> 16) & 0xFFFF;
8760                         big_buffer = 1;
8761                 }
8762
8763                 break;
8764         default:
8765                 /* Other tunnel types is not supported. */
8766                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8767                 rte_free(cld_filter);
8768                 return -EINVAL;
8769         }
8770
8771         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8772                 pfilter->element.flags =
8773                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8774         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8775                 pfilter->element.flags =
8776                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8777         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8778                 pfilter->element.flags =
8779                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8780         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8781                 pfilter->element.flags =
8782                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8783         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8784                 pfilter->element.flags |=
8785                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8786         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8787                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8788                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8789                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8790                         pfilter->element.flags |=
8791                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8792                 else
8793                         pfilter->element.flags |=
8794                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8795         } else {
8796                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8797                                                 &pfilter->element.flags);
8798                 if (val < 0) {
8799                         rte_free(cld_filter);
8800                         return -EINVAL;
8801                 }
8802         }
8803
8804         pfilter->element.flags |= rte_cpu_to_le_16(
8805                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8806                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8807         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8808         pfilter->element.queue_number =
8809                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8810
8811         if (!tunnel_filter->is_to_vf)
8812                 vsi = pf->main_vsi;
8813         else {
8814                 if (tunnel_filter->vf_id >= pf->vf_num) {
8815                         PMD_DRV_LOG(ERR, "Invalid argument.");
8816                         rte_free(cld_filter);
8817                         return -EINVAL;
8818                 }
8819                 vf = &pf->vfs[tunnel_filter->vf_id];
8820                 vsi = vf->vsi;
8821         }
8822
8823         /* Check if there is the filter in SW list */
8824         memset(&check_filter, 0, sizeof(check_filter));
8825         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8826         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8827         check_filter.vf_id = tunnel_filter->vf_id;
8828         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8829         if (add && node) {
8830                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8831                 rte_free(cld_filter);
8832                 return -EINVAL;
8833         }
8834
8835         if (!add && !node) {
8836                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8837                 rte_free(cld_filter);
8838                 return -EINVAL;
8839         }
8840
8841         if (add) {
8842                 if (big_buffer)
8843                         ret = i40e_aq_add_cloud_filters_bb(hw,
8844                                                    vsi->seid, cld_filter, 1);
8845                 else
8846                         ret = i40e_aq_add_cloud_filters(hw,
8847                                         vsi->seid, &cld_filter->element, 1);
8848                 if (ret < 0) {
8849                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8850                         rte_free(cld_filter);
8851                         return -ENOTSUP;
8852                 }
8853                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8854                 if (tunnel == NULL) {
8855                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8856                         rte_free(cld_filter);
8857                         return -ENOMEM;
8858                 }
8859
8860                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8861                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8862                 if (ret < 0)
8863                         rte_free(tunnel);
8864         } else {
8865                 if (big_buffer)
8866                         ret = i40e_aq_rem_cloud_filters_bb(
8867                                 hw, vsi->seid, cld_filter, 1);
8868                 else
8869                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8870                                                 &cld_filter->element, 1);
8871                 if (ret < 0) {
8872                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8873                         rte_free(cld_filter);
8874                         return -ENOTSUP;
8875                 }
8876                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8877         }
8878
8879         rte_free(cld_filter);
8880         return ret;
8881 }
8882
8883 static int
8884 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8885 {
8886         uint8_t i;
8887
8888         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8889                 if (pf->vxlan_ports[i] == port)
8890                         return i;
8891         }
8892
8893         return -1;
8894 }
8895
8896 static int
8897 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8898 {
8899         int  idx, ret;
8900         uint8_t filter_idx = 0;
8901         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8902
8903         idx = i40e_get_vxlan_port_idx(pf, port);
8904
8905         /* Check if port already exists */
8906         if (idx >= 0) {
8907                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8908                 return -EINVAL;
8909         }
8910
8911         /* Now check if there is space to add the new port */
8912         idx = i40e_get_vxlan_port_idx(pf, 0);
8913         if (idx < 0) {
8914                 PMD_DRV_LOG(ERR,
8915                         "Maximum number of UDP ports reached, not adding port %d",
8916                         port);
8917                 return -ENOSPC;
8918         }
8919
8920         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8921                                         &filter_idx, NULL);
8922         if (ret < 0) {
8923                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8924                 return -1;
8925         }
8926
8927         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8928                          port,  filter_idx);
8929
8930         /* New port: add it and mark its index in the bitmap */
8931         pf->vxlan_ports[idx] = port;
8932         pf->vxlan_bitmap |= (1 << idx);
8933
8934         if (!(pf->flags & I40E_FLAG_VXLAN))
8935                 pf->flags |= I40E_FLAG_VXLAN;
8936
8937         return 0;
8938 }
8939
8940 static int
8941 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8942 {
8943         int idx;
8944         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8945
8946         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8947                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8948                 return -EINVAL;
8949         }
8950
8951         idx = i40e_get_vxlan_port_idx(pf, port);
8952
8953         if (idx < 0) {
8954                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8955                 return -EINVAL;
8956         }
8957
8958         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8959                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8960                 return -1;
8961         }
8962
8963         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8964                         port, idx);
8965
8966         pf->vxlan_ports[idx] = 0;
8967         pf->vxlan_bitmap &= ~(1 << idx);
8968
8969         if (!pf->vxlan_bitmap)
8970                 pf->flags &= ~I40E_FLAG_VXLAN;
8971
8972         return 0;
8973 }
8974
8975 /* Add UDP tunneling port */
8976 static int
8977 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8978                              struct rte_eth_udp_tunnel *udp_tunnel)
8979 {
8980         int ret = 0;
8981         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8982
8983         if (udp_tunnel == NULL)
8984                 return -EINVAL;
8985
8986         switch (udp_tunnel->prot_type) {
8987         case RTE_TUNNEL_TYPE_VXLAN:
8988                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8989                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8990                 break;
8991         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8992                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8993                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8994                 break;
8995         case RTE_TUNNEL_TYPE_GENEVE:
8996         case RTE_TUNNEL_TYPE_TEREDO:
8997                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8998                 ret = -1;
8999                 break;
9000
9001         default:
9002                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
9003                 ret = -1;
9004                 break;
9005         }
9006
9007         return ret;
9008 }
9009
9010 /* Remove UDP tunneling port */
9011 static int
9012 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
9013                              struct rte_eth_udp_tunnel *udp_tunnel)
9014 {
9015         int ret = 0;
9016         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9017
9018         if (udp_tunnel == NULL)
9019                 return -EINVAL;
9020
9021         switch (udp_tunnel->prot_type) {
9022         case RTE_TUNNEL_TYPE_VXLAN:
9023         case RTE_TUNNEL_TYPE_VXLAN_GPE:
9024                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
9025                 break;
9026         case RTE_TUNNEL_TYPE_GENEVE:
9027         case RTE_TUNNEL_TYPE_TEREDO:
9028                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
9029                 ret = -1;
9030                 break;
9031         default:
9032                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
9033                 ret = -1;
9034                 break;
9035         }
9036
9037         return ret;
9038 }
9039
9040 /* Calculate the maximum number of contiguous PF queues that are configured */
9041 static int
9042 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
9043 {
9044         struct rte_eth_dev_data *data = pf->dev_data;
9045         int i, num;
9046         struct i40e_rx_queue *rxq;
9047
9048         num = 0;
9049         for (i = 0; i < pf->lan_nb_qps; i++) {
9050                 rxq = data->rx_queues[i];
9051                 if (rxq && rxq->q_set)
9052                         num++;
9053                 else
9054                         break;
9055         }
9056
9057         return num;
9058 }
9059
9060 /* Configure RSS */
9061 static int
9062 i40e_pf_config_rss(struct i40e_pf *pf)
9063 {
9064         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
9065         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9066         struct rte_eth_rss_conf rss_conf;
9067         uint32_t i, lut = 0;
9068         uint16_t j, num;
9069
9070         /*
9071          * If both VMDQ and RSS enabled, not all of PF queues are configured.
9072          * It's necessary to calculate the actual PF queues that are configured.
9073          */
9074         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
9075                 num = i40e_pf_calc_configured_queues_num(pf);
9076         else
9077                 num = pf->dev_data->nb_rx_queues;
9078
9079         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
9080         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
9081                         num);
9082
9083         if (num == 0) {
9084                 PMD_INIT_LOG(ERR,
9085                         "No PF queues are configured to enable RSS for port %u",
9086                         pf->dev_data->port_id);
9087                 return -ENOTSUP;
9088         }
9089
9090         if (pf->adapter->rss_reta_updated == 0) {
9091                 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
9092                         if (j == num)
9093                                 j = 0;
9094                         lut = (lut << 8) | (j & ((0x1 <<
9095                                 hw->func_caps.rss_table_entry_width) - 1));
9096                         if ((i & 3) == 3)
9097                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
9098                                                rte_bswap32(lut));
9099                 }
9100         }
9101
9102         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
9103         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
9104             !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
9105                 i40e_pf_disable_rss(pf);
9106                 return 0;
9107         }
9108         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
9109                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
9110                 /* Random default keys */
9111                 static uint32_t rss_key_default[] = {0x6b793944,
9112                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
9113                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
9114                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
9115
9116                 rss_conf.rss_key = (uint8_t *)rss_key_default;
9117                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
9118                                                         sizeof(uint32_t);
9119         }
9120
9121         return i40e_hw_rss_hash_set(pf, &rss_conf);
9122 }
9123
9124 static int
9125 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
9126                                struct rte_eth_tunnel_filter_conf *filter)
9127 {
9128         if (pf == NULL || filter == NULL) {
9129                 PMD_DRV_LOG(ERR, "Invalid parameter");
9130                 return -EINVAL;
9131         }
9132
9133         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
9134                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9135                 return -EINVAL;
9136         }
9137
9138         if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
9139                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
9140                 return -EINVAL;
9141         }
9142
9143         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
9144                 (rte_is_zero_ether_addr(&filter->outer_mac))) {
9145                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
9146                 return -EINVAL;
9147         }
9148
9149         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
9150                 (rte_is_zero_ether_addr(&filter->inner_mac))) {
9151                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
9152                 return -EINVAL;
9153         }
9154
9155         return 0;
9156 }
9157
9158 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
9159 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
9160 int
9161 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
9162 {
9163         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9164         uint32_t val, reg;
9165         int ret = -EINVAL;
9166
9167         if (pf->support_multi_driver) {
9168                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
9169                 return -ENOTSUP;
9170         }
9171
9172         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
9173         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
9174
9175         if (len == 3) {
9176                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
9177         } else if (len == 4) {
9178                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
9179         } else {
9180                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
9181                 return ret;
9182         }
9183
9184         if (reg != val) {
9185                 ret = i40e_aq_debug_write_global_register(hw,
9186                                                    I40E_GL_PRS_FVBM(2),
9187                                                    reg, NULL);
9188                 if (ret != 0)
9189                         return ret;
9190                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
9191                             "with value 0x%08x",
9192                             I40E_GL_PRS_FVBM(2), reg);
9193         } else {
9194                 ret = 0;
9195         }
9196         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9197                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9198
9199         return ret;
9200 }
9201
9202 static int
9203 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
9204 {
9205         int ret = -EINVAL;
9206
9207         if (!hw || !cfg)
9208                 return -EINVAL;
9209
9210         switch (cfg->cfg_type) {
9211         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
9212                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
9213                 break;
9214         default:
9215                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
9216                 break;
9217         }
9218
9219         return ret;
9220 }
9221
9222 static int
9223 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
9224                                enum rte_filter_op filter_op,
9225                                void *arg)
9226 {
9227         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9228         int ret = I40E_ERR_PARAM;
9229
9230         switch (filter_op) {
9231         case RTE_ETH_FILTER_SET:
9232                 ret = i40e_dev_global_config_set(hw,
9233                         (struct rte_eth_global_cfg *)arg);
9234                 break;
9235         default:
9236                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9237                 break;
9238         }
9239
9240         return ret;
9241 }
9242
9243 static int
9244 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
9245                           enum rte_filter_op filter_op,
9246                           void *arg)
9247 {
9248         struct rte_eth_tunnel_filter_conf *filter;
9249         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9250         int ret = I40E_SUCCESS;
9251
9252         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
9253
9254         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
9255                 return I40E_ERR_PARAM;
9256
9257         switch (filter_op) {
9258         case RTE_ETH_FILTER_NOP:
9259                 if (!(pf->flags & I40E_FLAG_VXLAN))
9260                         ret = I40E_NOT_SUPPORTED;
9261                 break;
9262         case RTE_ETH_FILTER_ADD:
9263                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
9264                 break;
9265         case RTE_ETH_FILTER_DELETE:
9266                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
9267                 break;
9268         default:
9269                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9270                 ret = I40E_ERR_PARAM;
9271                 break;
9272         }
9273
9274         return ret;
9275 }
9276
9277 /* Get the symmetric hash enable configurations per port */
9278 static void
9279 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
9280 {
9281         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9282
9283         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
9284 }
9285
9286 /* Set the symmetric hash enable configurations per port */
9287 static void
9288 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9289 {
9290         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9291
9292         if (enable > 0) {
9293                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
9294                         PMD_DRV_LOG(INFO,
9295                                 "Symmetric hash has already been enabled");
9296                         return;
9297                 }
9298                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9299         } else {
9300                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
9301                         PMD_DRV_LOG(INFO,
9302                                 "Symmetric hash has already been disabled");
9303                         return;
9304                 }
9305                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9306         }
9307         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9308         I40E_WRITE_FLUSH(hw);
9309 }
9310
9311 /*
9312  * Get global configurations of hash function type and symmetric hash enable
9313  * per flow type (pctype). Note that global configuration means it affects all
9314  * the ports on the same NIC.
9315  */
9316 static int
9317 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
9318                                    struct rte_eth_hash_global_conf *g_cfg)
9319 {
9320         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9321         uint32_t reg;
9322         uint16_t i, j;
9323
9324         memset(g_cfg, 0, sizeof(*g_cfg));
9325         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9326         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
9327                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
9328         else
9329                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
9330         PMD_DRV_LOG(DEBUG, "Hash function is %s",
9331                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
9332
9333         /*
9334          * As i40e supports less than 64 flow types, only first 64 bits need to
9335          * be checked.
9336          */
9337         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9338                 g_cfg->valid_bit_mask[i] = 0ULL;
9339                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
9340         }
9341
9342         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
9343
9344         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
9345                 if (!adapter->pctypes_tbl[i])
9346                         continue;
9347                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9348                      j < I40E_FILTER_PCTYPE_MAX; j++) {
9349                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
9350                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
9351                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
9352                                         g_cfg->sym_hash_enable_mask[0] |=
9353                                                                 (1ULL << i);
9354                                 }
9355                         }
9356                 }
9357         }
9358
9359         return 0;
9360 }
9361
9362 static int
9363 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
9364                               const struct rte_eth_hash_global_conf *g_cfg)
9365 {
9366         uint32_t i;
9367         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
9368
9369         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
9370                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
9371                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
9372                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
9373                                                 g_cfg->hash_func);
9374                 return -EINVAL;
9375         }
9376
9377         /*
9378          * As i40e supports less than 64 flow types, only first 64 bits need to
9379          * be checked.
9380          */
9381         mask0 = g_cfg->valid_bit_mask[0];
9382         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9383                 if (i == 0) {
9384                         /* Check if any unsupported flow type configured */
9385                         if ((mask0 | i40e_mask) ^ i40e_mask)
9386                                 goto mask_err;
9387                 } else {
9388                         if (g_cfg->valid_bit_mask[i])
9389                                 goto mask_err;
9390                 }
9391         }
9392
9393         return 0;
9394
9395 mask_err:
9396         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
9397
9398         return -EINVAL;
9399 }
9400
9401 /*
9402  * Set global configurations of hash function type and symmetric hash enable
9403  * per flow type (pctype). Note any modifying global configuration will affect
9404  * all the ports on the same NIC.
9405  */
9406 static int
9407 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
9408                                    struct rte_eth_hash_global_conf *g_cfg)
9409 {
9410         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9411         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9412         int ret;
9413         uint16_t i, j;
9414         uint32_t reg;
9415         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
9416
9417         if (pf->support_multi_driver) {
9418                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
9419                 return -ENOTSUP;
9420         }
9421
9422         /* Check the input parameters */
9423         ret = i40e_hash_global_config_check(adapter, g_cfg);
9424         if (ret < 0)
9425                 return ret;
9426
9427         /*
9428          * As i40e supports less than 64 flow types, only first 64 bits need to
9429          * be configured.
9430          */
9431         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
9432                 if (mask0 & (1UL << i)) {
9433                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
9434                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
9435
9436                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9437                              j < I40E_FILTER_PCTYPE_MAX; j++) {
9438                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
9439                                         i40e_write_global_rx_ctl(hw,
9440                                                           I40E_GLQF_HSYM(j),
9441                                                           reg);
9442                         }
9443                 }
9444         }
9445
9446         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9447         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
9448                 /* Toeplitz */
9449                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
9450                         PMD_DRV_LOG(DEBUG,
9451                                 "Hash function already set to Toeplitz");
9452                         goto out;
9453                 }
9454                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
9455         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
9456                 /* Simple XOR */
9457                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
9458                         PMD_DRV_LOG(DEBUG,
9459                                 "Hash function already set to Simple XOR");
9460                         goto out;
9461                 }
9462                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
9463         } else
9464                 /* Use the default, and keep it as it is */
9465                 goto out;
9466
9467         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
9468
9469 out:
9470         I40E_WRITE_FLUSH(hw);
9471
9472         return 0;
9473 }
9474
9475 /**
9476  * Valid input sets for hash and flow director filters per PCTYPE
9477  */
9478 static uint64_t
9479 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9480                 enum rte_filter_type filter)
9481 {
9482         uint64_t valid;
9483
9484         static const uint64_t valid_hash_inset_table[] = {
9485                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9486                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9487                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9488                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9489                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9490                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9491                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9492                         I40E_INSET_FLEX_PAYLOAD,
9493                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9494                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9495                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9496                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9497                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9498                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9499                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9500                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9501                         I40E_INSET_FLEX_PAYLOAD,
9502                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9503                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9504                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9505                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9506                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9507                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9508                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9509                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9510                         I40E_INSET_FLEX_PAYLOAD,
9511                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9512                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9513                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9514                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9515                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9516                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9517                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9518                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9519                         I40E_INSET_FLEX_PAYLOAD,
9520                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9521                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9522                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9523                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9524                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9525                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9526                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9527                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9528                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9529                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9530                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9531                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9532                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9533                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9534                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9535                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9536                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9537                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9538                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9539                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9540                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9541                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9542                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9543                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9544                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9545                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9546                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9547                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9548                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9549                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9550                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9551                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9552                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9553                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9554                         I40E_INSET_FLEX_PAYLOAD,
9555                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9556                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9557                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9558                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9559                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9560                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9561                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9562                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9563                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9564                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9565                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9566                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9567                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9568                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9569                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9570                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9571                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9572                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9573                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9574                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9575                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9576                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9577                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9578                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9579                         I40E_INSET_FLEX_PAYLOAD,
9580                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9581                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9582                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9583                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9584                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9585                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9586                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9587                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9588                         I40E_INSET_FLEX_PAYLOAD,
9589                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9590                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9591                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9592                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9593                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9594                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9595                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9596                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9597                         I40E_INSET_FLEX_PAYLOAD,
9598                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9599                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9600                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9601                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9602                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9603                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9604                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9605                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9606                         I40E_INSET_FLEX_PAYLOAD,
9607                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9608                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9609                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9610                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9611                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9612                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9613                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9614                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9615                         I40E_INSET_FLEX_PAYLOAD,
9616                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9617                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9618                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9619                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9620                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9621                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9622                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9623                         I40E_INSET_FLEX_PAYLOAD,
9624                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9625                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9626                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9627                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9628                         I40E_INSET_FLEX_PAYLOAD,
9629         };
9630
9631         /**
9632          * Flow director supports only fields defined in
9633          * union rte_eth_fdir_flow.
9634          */
9635         static const uint64_t valid_fdir_inset_table[] = {
9636                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9637                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9638                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9639                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9640                 I40E_INSET_IPV4_TTL,
9641                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9642                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9643                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9644                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9645                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9646                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9647                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9648                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9649                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9650                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9651                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9652                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9653                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9654                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9655                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9656                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9657                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9658                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9659                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9660                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9661                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9662                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9663                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9664                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9665                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9666                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9667                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9668                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9669                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9670                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9671                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9672                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9673                 I40E_INSET_SCTP_VT,
9674                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9675                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9676                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9677                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9678                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9679                 I40E_INSET_IPV4_TTL,
9680                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9681                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9682                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9683                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9684                 I40E_INSET_IPV6_HOP_LIMIT,
9685                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9686                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9687                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9688                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9689                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9690                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9691                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9692                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9693                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9694                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9695                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9696                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9697                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9698                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9699                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9700                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9701                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9702                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9703                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9704                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9705                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9706                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9707                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9708                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9709                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9710                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9711                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9712                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9713                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9714                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9715                 I40E_INSET_SCTP_VT,
9716                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9717                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9718                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9719                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9720                 I40E_INSET_IPV6_HOP_LIMIT,
9721                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9722                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9723                 I40E_INSET_LAST_ETHER_TYPE,
9724         };
9725
9726         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9727                 return 0;
9728         if (filter == RTE_ETH_FILTER_HASH)
9729                 valid = valid_hash_inset_table[pctype];
9730         else
9731                 valid = valid_fdir_inset_table[pctype];
9732
9733         return valid;
9734 }
9735
9736 /**
9737  * Validate if the input set is allowed for a specific PCTYPE
9738  */
9739 int
9740 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9741                 enum rte_filter_type filter, uint64_t inset)
9742 {
9743         uint64_t valid;
9744
9745         valid = i40e_get_valid_input_set(pctype, filter);
9746         if (inset & (~valid))
9747                 return -EINVAL;
9748
9749         return 0;
9750 }
9751
9752 /* default input set fields combination per pctype */
9753 uint64_t
9754 i40e_get_default_input_set(uint16_t pctype)
9755 {
9756         static const uint64_t default_inset_table[] = {
9757                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9758                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9759                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9760                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9761                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9762                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9763                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9764                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9765                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9766                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9767                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9768                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9769                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9770                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9771                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9772                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9773                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9774                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9775                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9776                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9777                         I40E_INSET_SCTP_VT,
9778                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9779                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9780                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9781                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9782                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9783                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9784                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9785                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9786                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9787                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9788                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9789                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9790                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9791                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9792                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9793                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9794                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9795                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9796                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9797                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9798                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9799                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9800                         I40E_INSET_SCTP_VT,
9801                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9802                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9803                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9804                         I40E_INSET_LAST_ETHER_TYPE,
9805         };
9806
9807         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9808                 return 0;
9809
9810         return default_inset_table[pctype];
9811 }
9812
9813 /**
9814  * Parse the input set from index to logical bit masks
9815  */
9816 static int
9817 i40e_parse_input_set(uint64_t *inset,
9818                      enum i40e_filter_pctype pctype,
9819                      enum rte_eth_input_set_field *field,
9820                      uint16_t size)
9821 {
9822         uint16_t i, j;
9823         int ret = -EINVAL;
9824
9825         static const struct {
9826                 enum rte_eth_input_set_field field;
9827                 uint64_t inset;
9828         } inset_convert_table[] = {
9829                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9830                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9831                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9832                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9833                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9834                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9835                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9836                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9837                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9838                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9839                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9840                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9841                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9842                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9843                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9844                         I40E_INSET_IPV6_NEXT_HDR},
9845                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9846                         I40E_INSET_IPV6_HOP_LIMIT},
9847                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9848                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9849                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9850                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9851                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9852                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9853                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9854                         I40E_INSET_SCTP_VT},
9855                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9856                         I40E_INSET_TUNNEL_DMAC},
9857                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9858                         I40E_INSET_VLAN_TUNNEL},
9859                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9860                         I40E_INSET_TUNNEL_ID},
9861                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9862                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9863                         I40E_INSET_FLEX_PAYLOAD_W1},
9864                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9865                         I40E_INSET_FLEX_PAYLOAD_W2},
9866                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9867                         I40E_INSET_FLEX_PAYLOAD_W3},
9868                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9869                         I40E_INSET_FLEX_PAYLOAD_W4},
9870                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9871                         I40E_INSET_FLEX_PAYLOAD_W5},
9872                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9873                         I40E_INSET_FLEX_PAYLOAD_W6},
9874                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9875                         I40E_INSET_FLEX_PAYLOAD_W7},
9876                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9877                         I40E_INSET_FLEX_PAYLOAD_W8},
9878         };
9879
9880         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9881                 return ret;
9882
9883         /* Only one item allowed for default or all */
9884         if (size == 1) {
9885                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9886                         *inset = i40e_get_default_input_set(pctype);
9887                         return 0;
9888                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9889                         *inset = I40E_INSET_NONE;
9890                         return 0;
9891                 }
9892         }
9893
9894         for (i = 0, *inset = 0; i < size; i++) {
9895                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9896                         if (field[i] == inset_convert_table[j].field) {
9897                                 *inset |= inset_convert_table[j].inset;
9898                                 break;
9899                         }
9900                 }
9901
9902                 /* It contains unsupported input set, return immediately */
9903                 if (j == RTE_DIM(inset_convert_table))
9904                         return ret;
9905         }
9906
9907         return 0;
9908 }
9909
9910 /**
9911  * Translate the input set from bit masks to register aware bit masks
9912  * and vice versa
9913  */
9914 uint64_t
9915 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9916 {
9917         uint64_t val = 0;
9918         uint16_t i;
9919
9920         struct inset_map {
9921                 uint64_t inset;
9922                 uint64_t inset_reg;
9923         };
9924
9925         static const struct inset_map inset_map_common[] = {
9926                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9927                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9928                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9929                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9930                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9931                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9932                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9933                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9934                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9935                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9936                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9937                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9938                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9939                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9940                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9941                 {I40E_INSET_TUNNEL_DMAC,
9942                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9943                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9944                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9945                 {I40E_INSET_TUNNEL_SRC_PORT,
9946                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9947                 {I40E_INSET_TUNNEL_DST_PORT,
9948                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9949                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9950                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9951                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9952                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9953                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9954                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9955                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9956                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9957                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9958         };
9959
9960     /* some different registers map in x722*/
9961         static const struct inset_map inset_map_diff_x722[] = {
9962                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9963                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9964                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9965                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9966         };
9967
9968         static const struct inset_map inset_map_diff_not_x722[] = {
9969                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9970                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9971                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9972                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9973         };
9974
9975         if (input == 0)
9976                 return val;
9977
9978         /* Translate input set to register aware inset */
9979         if (type == I40E_MAC_X722) {
9980                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9981                         if (input & inset_map_diff_x722[i].inset)
9982                                 val |= inset_map_diff_x722[i].inset_reg;
9983                 }
9984         } else {
9985                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9986                         if (input & inset_map_diff_not_x722[i].inset)
9987                                 val |= inset_map_diff_not_x722[i].inset_reg;
9988                 }
9989         }
9990
9991         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9992                 if (input & inset_map_common[i].inset)
9993                         val |= inset_map_common[i].inset_reg;
9994         }
9995
9996         return val;
9997 }
9998
9999 int
10000 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
10001 {
10002         uint8_t i, idx = 0;
10003         uint64_t inset_need_mask = inset;
10004
10005         static const struct {
10006                 uint64_t inset;
10007                 uint32_t mask;
10008         } inset_mask_map[] = {
10009                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
10010                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
10011                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
10012                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
10013                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
10014                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
10015                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
10016                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
10017         };
10018
10019         if (!inset || !mask || !nb_elem)
10020                 return 0;
10021
10022         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
10023                 /* Clear the inset bit, if no MASK is required,
10024                  * for example proto + ttl
10025                  */
10026                 if ((inset & inset_mask_map[i].inset) ==
10027                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
10028                         inset_need_mask &= ~inset_mask_map[i].inset;
10029                 if (!inset_need_mask)
10030                         return 0;
10031         }
10032         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
10033                 if ((inset_need_mask & inset_mask_map[i].inset) ==
10034                     inset_mask_map[i].inset) {
10035                         if (idx >= nb_elem) {
10036                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
10037                                 return -EINVAL;
10038                         }
10039                         mask[idx] = inset_mask_map[i].mask;
10040                         idx++;
10041                 }
10042         }
10043
10044         return idx;
10045 }
10046
10047 void
10048 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
10049 {
10050         uint32_t reg = i40e_read_rx_ctl(hw, addr);
10051
10052         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
10053         if (reg != val)
10054                 i40e_write_rx_ctl(hw, addr, val);
10055         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
10056                     (uint32_t)i40e_read_rx_ctl(hw, addr));
10057 }
10058
10059 void
10060 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
10061 {
10062         uint32_t reg = i40e_read_rx_ctl(hw, addr);
10063         struct rte_eth_dev *dev;
10064
10065         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
10066         if (reg != val) {
10067                 i40e_write_rx_ctl(hw, addr, val);
10068                 PMD_DRV_LOG(WARNING,
10069                             "i40e device %s changed global register [0x%08x]."
10070                             " original: 0x%08x, new: 0x%08x",
10071                             dev->device->name, addr, reg,
10072                             (uint32_t)i40e_read_rx_ctl(hw, addr));
10073         }
10074 }
10075
10076 static void
10077 i40e_filter_input_set_init(struct i40e_pf *pf)
10078 {
10079         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10080         enum i40e_filter_pctype pctype;
10081         uint64_t input_set, inset_reg;
10082         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10083         int num, i;
10084         uint16_t flow_type;
10085
10086         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
10087              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
10088                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
10089
10090                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
10091                         continue;
10092
10093                 input_set = i40e_get_default_input_set(pctype);
10094
10095                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10096                                                    I40E_INSET_MASK_NUM_REG);
10097                 if (num < 0)
10098                         return;
10099                 if (pf->support_multi_driver && num > 0) {
10100                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
10101                         return;
10102                 }
10103                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
10104                                         input_set);
10105
10106                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
10107                                       (uint32_t)(inset_reg & UINT32_MAX));
10108                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
10109                                      (uint32_t)((inset_reg >>
10110                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
10111                 if (!pf->support_multi_driver) {
10112                         i40e_check_write_global_reg(hw,
10113                                             I40E_GLQF_HASH_INSET(0, pctype),
10114                                             (uint32_t)(inset_reg & UINT32_MAX));
10115                         i40e_check_write_global_reg(hw,
10116                                              I40E_GLQF_HASH_INSET(1, pctype),
10117                                              (uint32_t)((inset_reg >>
10118                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
10119
10120                         for (i = 0; i < num; i++) {
10121                                 i40e_check_write_global_reg(hw,
10122                                                     I40E_GLQF_FD_MSK(i, pctype),
10123                                                     mask_reg[i]);
10124                                 i40e_check_write_global_reg(hw,
10125                                                   I40E_GLQF_HASH_MSK(i, pctype),
10126                                                   mask_reg[i]);
10127                         }
10128                         /*clear unused mask registers of the pctype */
10129                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
10130                                 i40e_check_write_global_reg(hw,
10131                                                     I40E_GLQF_FD_MSK(i, pctype),
10132                                                     0);
10133                                 i40e_check_write_global_reg(hw,
10134                                                   I40E_GLQF_HASH_MSK(i, pctype),
10135                                                   0);
10136                         }
10137                 } else {
10138                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
10139                 }
10140                 I40E_WRITE_FLUSH(hw);
10141
10142                 /* store the default input set */
10143                 if (!pf->support_multi_driver)
10144                         pf->hash_input_set[pctype] = input_set;
10145                 pf->fdir.input_set[pctype] = input_set;
10146         }
10147 }
10148
10149 int
10150 i40e_hash_filter_inset_select(struct i40e_hw *hw,
10151                          struct rte_eth_input_set_conf *conf)
10152 {
10153         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
10154         enum i40e_filter_pctype pctype;
10155         uint64_t input_set, inset_reg = 0;
10156         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10157         int ret, i, num;
10158
10159         if (!conf) {
10160                 PMD_DRV_LOG(ERR, "Invalid pointer");
10161                 return -EFAULT;
10162         }
10163         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10164             conf->op != RTE_ETH_INPUT_SET_ADD) {
10165                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10166                 return -EINVAL;
10167         }
10168
10169         if (pf->support_multi_driver) {
10170                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
10171                 return -ENOTSUP;
10172         }
10173
10174         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10175         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10176                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10177                 return -EINVAL;
10178         }
10179
10180         if (hw->mac.type == I40E_MAC_X722) {
10181                 /* get translated pctype value in fd pctype register */
10182                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
10183                         I40E_GLQF_FD_PCTYPES((int)pctype));
10184         }
10185
10186         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10187                                    conf->inset_size);
10188         if (ret) {
10189                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10190                 return -EINVAL;
10191         }
10192
10193         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
10194                 /* get inset value in register */
10195                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
10196                 inset_reg <<= I40E_32_BIT_WIDTH;
10197                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
10198                 input_set |= pf->hash_input_set[pctype];
10199         }
10200         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10201                                            I40E_INSET_MASK_NUM_REG);
10202         if (num < 0)
10203                 return -EINVAL;
10204
10205         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10206
10207         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
10208                                     (uint32_t)(inset_reg & UINT32_MAX));
10209         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
10210                                     (uint32_t)((inset_reg >>
10211                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
10212
10213         for (i = 0; i < num; i++)
10214                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10215                                             mask_reg[i]);
10216         /*clear unused mask registers of the pctype */
10217         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10218                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10219                                             0);
10220         I40E_WRITE_FLUSH(hw);
10221
10222         pf->hash_input_set[pctype] = input_set;
10223         return 0;
10224 }
10225
10226 int
10227 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
10228                          struct rte_eth_input_set_conf *conf)
10229 {
10230         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10231         enum i40e_filter_pctype pctype;
10232         uint64_t input_set, inset_reg = 0;
10233         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10234         int ret, i, num;
10235
10236         if (!hw || !conf) {
10237                 PMD_DRV_LOG(ERR, "Invalid pointer");
10238                 return -EFAULT;
10239         }
10240         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10241             conf->op != RTE_ETH_INPUT_SET_ADD) {
10242                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10243                 return -EINVAL;
10244         }
10245
10246         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10247
10248         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10249                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10250                 return -EINVAL;
10251         }
10252
10253         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10254                                    conf->inset_size);
10255         if (ret) {
10256                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10257                 return -EINVAL;
10258         }
10259
10260         /* get inset value in register */
10261         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
10262         inset_reg <<= I40E_32_BIT_WIDTH;
10263         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
10264
10265         /* Can not change the inset reg for flex payload for fdir,
10266          * it is done by writing I40E_PRTQF_FD_FLXINSET
10267          * in i40e_set_flex_mask_on_pctype.
10268          */
10269         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
10270                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
10271         else
10272                 input_set |= pf->fdir.input_set[pctype];
10273         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10274                                            I40E_INSET_MASK_NUM_REG);
10275         if (num < 0)
10276                 return -EINVAL;
10277         if (pf->support_multi_driver && num > 0) {
10278                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10279                 return -ENOTSUP;
10280         }
10281
10282         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10283
10284         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
10285                               (uint32_t)(inset_reg & UINT32_MAX));
10286         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
10287                              (uint32_t)((inset_reg >>
10288                              I40E_32_BIT_WIDTH) & UINT32_MAX));
10289
10290         if (!pf->support_multi_driver) {
10291                 for (i = 0; i < num; i++)
10292                         i40e_check_write_global_reg(hw,
10293                                                     I40E_GLQF_FD_MSK(i, pctype),
10294                                                     mask_reg[i]);
10295                 /*clear unused mask registers of the pctype */
10296                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10297                         i40e_check_write_global_reg(hw,
10298                                                     I40E_GLQF_FD_MSK(i, pctype),
10299                                                     0);
10300         } else {
10301                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10302         }
10303         I40E_WRITE_FLUSH(hw);
10304
10305         pf->fdir.input_set[pctype] = input_set;
10306         return 0;
10307 }
10308
10309 static int
10310 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10311 {
10312         int ret = 0;
10313
10314         if (!hw || !info) {
10315                 PMD_DRV_LOG(ERR, "Invalid pointer");
10316                 return -EFAULT;
10317         }
10318
10319         switch (info->info_type) {
10320         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10321                 i40e_get_symmetric_hash_enable_per_port(hw,
10322                                         &(info->info.enable));
10323                 break;
10324         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10325                 ret = i40e_get_hash_filter_global_config(hw,
10326                                 &(info->info.global_conf));
10327                 break;
10328         default:
10329                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10330                                                         info->info_type);
10331                 ret = -EINVAL;
10332                 break;
10333         }
10334
10335         return ret;
10336 }
10337
10338 static int
10339 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10340 {
10341         int ret = 0;
10342
10343         if (!hw || !info) {
10344                 PMD_DRV_LOG(ERR, "Invalid pointer");
10345                 return -EFAULT;
10346         }
10347
10348         switch (info->info_type) {
10349         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10350                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
10351                 break;
10352         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10353                 ret = i40e_set_hash_filter_global_config(hw,
10354                                 &(info->info.global_conf));
10355                 break;
10356         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
10357                 ret = i40e_hash_filter_inset_select(hw,
10358                                                &(info->info.input_set_conf));
10359                 break;
10360
10361         default:
10362                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10363                                                         info->info_type);
10364                 ret = -EINVAL;
10365                 break;
10366         }
10367
10368         return ret;
10369 }
10370
10371 /* Operations for hash function */
10372 static int
10373 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
10374                       enum rte_filter_op filter_op,
10375                       void *arg)
10376 {
10377         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10378         int ret = 0;
10379
10380         switch (filter_op) {
10381         case RTE_ETH_FILTER_NOP:
10382                 break;
10383         case RTE_ETH_FILTER_GET:
10384                 ret = i40e_hash_filter_get(hw,
10385                         (struct rte_eth_hash_filter_info *)arg);
10386                 break;
10387         case RTE_ETH_FILTER_SET:
10388                 ret = i40e_hash_filter_set(hw,
10389                         (struct rte_eth_hash_filter_info *)arg);
10390                 break;
10391         default:
10392                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
10393                                                                 filter_op);
10394                 ret = -ENOTSUP;
10395                 break;
10396         }
10397
10398         return ret;
10399 }
10400
10401 /* Convert ethertype filter structure */
10402 static int
10403 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
10404                               struct i40e_ethertype_filter *filter)
10405 {
10406         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
10407                 RTE_ETHER_ADDR_LEN);
10408         filter->input.ether_type = input->ether_type;
10409         filter->flags = input->flags;
10410         filter->queue = input->queue;
10411
10412         return 0;
10413 }
10414
10415 /* Check if there exists the ehtertype filter */
10416 struct i40e_ethertype_filter *
10417 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
10418                                 const struct i40e_ethertype_filter_input *input)
10419 {
10420         int ret;
10421
10422         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
10423         if (ret < 0)
10424                 return NULL;
10425
10426         return ethertype_rule->hash_map[ret];
10427 }
10428
10429 /* Add ethertype filter in SW list */
10430 static int
10431 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
10432                                 struct i40e_ethertype_filter *filter)
10433 {
10434         struct i40e_ethertype_rule *rule = &pf->ethertype;
10435         int ret;
10436
10437         ret = rte_hash_add_key(rule->hash_table, &filter->input);
10438         if (ret < 0) {
10439                 PMD_DRV_LOG(ERR,
10440                             "Failed to insert ethertype filter"
10441                             " to hash table %d!",
10442                             ret);
10443                 return ret;
10444         }
10445         rule->hash_map[ret] = filter;
10446
10447         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
10448
10449         return 0;
10450 }
10451
10452 /* Delete ethertype filter in SW list */
10453 int
10454 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
10455                              struct i40e_ethertype_filter_input *input)
10456 {
10457         struct i40e_ethertype_rule *rule = &pf->ethertype;
10458         struct i40e_ethertype_filter *filter;
10459         int ret;
10460
10461         ret = rte_hash_del_key(rule->hash_table, input);
10462         if (ret < 0) {
10463                 PMD_DRV_LOG(ERR,
10464                             "Failed to delete ethertype filter"
10465                             " to hash table %d!",
10466                             ret);
10467                 return ret;
10468         }
10469         filter = rule->hash_map[ret];
10470         rule->hash_map[ret] = NULL;
10471
10472         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
10473         rte_free(filter);
10474
10475         return 0;
10476 }
10477
10478 /*
10479  * Configure ethertype filter, which can director packet by filtering
10480  * with mac address and ether_type or only ether_type
10481  */
10482 int
10483 i40e_ethertype_filter_set(struct i40e_pf *pf,
10484                         struct rte_eth_ethertype_filter *filter,
10485                         bool add)
10486 {
10487         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10488         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
10489         struct i40e_ethertype_filter *ethertype_filter, *node;
10490         struct i40e_ethertype_filter check_filter;
10491         struct i40e_control_filter_stats stats;
10492         uint16_t flags = 0;
10493         int ret;
10494
10495         if (filter->queue >= pf->dev_data->nb_rx_queues) {
10496                 PMD_DRV_LOG(ERR, "Invalid queue ID");
10497                 return -EINVAL;
10498         }
10499         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
10500                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
10501                 PMD_DRV_LOG(ERR,
10502                         "unsupported ether_type(0x%04x) in control packet filter.",
10503                         filter->ether_type);
10504                 return -EINVAL;
10505         }
10506         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
10507                 PMD_DRV_LOG(WARNING,
10508                         "filter vlan ether_type in first tag is not supported.");
10509
10510         /* Check if there is the filter in SW list */
10511         memset(&check_filter, 0, sizeof(check_filter));
10512         i40e_ethertype_filter_convert(filter, &check_filter);
10513         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
10514                                                &check_filter.input);
10515         if (add && node) {
10516                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
10517                 return -EINVAL;
10518         }
10519
10520         if (!add && !node) {
10521                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
10522                 return -EINVAL;
10523         }
10524
10525         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
10526                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10527         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
10528                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10529         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10530
10531         memset(&stats, 0, sizeof(stats));
10532         ret = i40e_aq_add_rem_control_packet_filter(hw,
10533                         filter->mac_addr.addr_bytes,
10534                         filter->ether_type, flags,
10535                         pf->main_vsi->seid,
10536                         filter->queue, add, &stats, NULL);
10537
10538         PMD_DRV_LOG(INFO,
10539                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
10540                 ret, stats.mac_etype_used, stats.etype_used,
10541                 stats.mac_etype_free, stats.etype_free);
10542         if (ret < 0)
10543                 return -ENOSYS;
10544
10545         /* Add or delete a filter in SW list */
10546         if (add) {
10547                 ethertype_filter = rte_zmalloc("ethertype_filter",
10548                                        sizeof(*ethertype_filter), 0);
10549                 if (ethertype_filter == NULL) {
10550                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
10551                         return -ENOMEM;
10552                 }
10553
10554                 rte_memcpy(ethertype_filter, &check_filter,
10555                            sizeof(check_filter));
10556                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
10557                 if (ret < 0)
10558                         rte_free(ethertype_filter);
10559         } else {
10560                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
10561         }
10562
10563         return ret;
10564 }
10565
10566 /*
10567  * Handle operations for ethertype filter.
10568  */
10569 static int
10570 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
10571                                 enum rte_filter_op filter_op,
10572                                 void *arg)
10573 {
10574         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10575         int ret = 0;
10576
10577         if (filter_op == RTE_ETH_FILTER_NOP)
10578                 return ret;
10579
10580         if (arg == NULL) {
10581                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
10582                             filter_op);
10583                 return -EINVAL;
10584         }
10585
10586         switch (filter_op) {
10587         case RTE_ETH_FILTER_ADD:
10588                 ret = i40e_ethertype_filter_set(pf,
10589                         (struct rte_eth_ethertype_filter *)arg,
10590                         TRUE);
10591                 break;
10592         case RTE_ETH_FILTER_DELETE:
10593                 ret = i40e_ethertype_filter_set(pf,
10594                         (struct rte_eth_ethertype_filter *)arg,
10595                         FALSE);
10596                 break;
10597         default:
10598                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
10599                 ret = -ENOSYS;
10600                 break;
10601         }
10602         return ret;
10603 }
10604
10605 static int
10606 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
10607                      enum rte_filter_type filter_type,
10608                      enum rte_filter_op filter_op,
10609                      void *arg)
10610 {
10611         int ret = 0;
10612
10613         if (dev == NULL)
10614                 return -EINVAL;
10615
10616         switch (filter_type) {
10617         case RTE_ETH_FILTER_NONE:
10618                 /* For global configuration */
10619                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
10620                 break;
10621         case RTE_ETH_FILTER_HASH:
10622                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
10623                 break;
10624         case RTE_ETH_FILTER_MACVLAN:
10625                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
10626                 break;
10627         case RTE_ETH_FILTER_ETHERTYPE:
10628                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
10629                 break;
10630         case RTE_ETH_FILTER_TUNNEL:
10631                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
10632                 break;
10633         case RTE_ETH_FILTER_FDIR:
10634                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
10635                 break;
10636         case RTE_ETH_FILTER_GENERIC:
10637                 if (filter_op != RTE_ETH_FILTER_GET)
10638                         return -EINVAL;
10639                 *(const void **)arg = &i40e_flow_ops;
10640                 break;
10641         default:
10642                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
10643                                                         filter_type);
10644                 ret = -EINVAL;
10645                 break;
10646         }
10647
10648         return ret;
10649 }
10650
10651 /*
10652  * Check and enable Extended Tag.
10653  * Enabling Extended Tag is important for 40G performance.
10654  */
10655 static void
10656 i40e_enable_extended_tag(struct rte_eth_dev *dev)
10657 {
10658         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10659         uint32_t buf = 0;
10660         int ret;
10661
10662         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10663                                       PCI_DEV_CAP_REG);
10664         if (ret < 0) {
10665                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10666                             PCI_DEV_CAP_REG);
10667                 return;
10668         }
10669         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
10670                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
10671                 return;
10672         }
10673
10674         buf = 0;
10675         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10676                                       PCI_DEV_CTRL_REG);
10677         if (ret < 0) {
10678                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10679                             PCI_DEV_CTRL_REG);
10680                 return;
10681         }
10682         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
10683                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
10684                 return;
10685         }
10686         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
10687         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
10688                                        PCI_DEV_CTRL_REG);
10689         if (ret < 0) {
10690                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
10691                             PCI_DEV_CTRL_REG);
10692                 return;
10693         }
10694 }
10695
10696 /*
10697  * As some registers wouldn't be reset unless a global hardware reset,
10698  * hardware initialization is needed to put those registers into an
10699  * expected initial state.
10700  */
10701 static void
10702 i40e_hw_init(struct rte_eth_dev *dev)
10703 {
10704         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10705
10706         i40e_enable_extended_tag(dev);
10707
10708         /* clear the PF Queue Filter control register */
10709         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
10710
10711         /* Disable symmetric hash per port */
10712         i40e_set_symmetric_hash_enable_per_port(hw, 0);
10713 }
10714
10715 /*
10716  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
10717  * however this function will return only one highest pctype index,
10718  * which is not quite correct. This is known problem of i40e driver
10719  * and needs to be fixed later.
10720  */
10721 enum i40e_filter_pctype
10722 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
10723 {
10724         int i;
10725         uint64_t pctype_mask;
10726
10727         if (flow_type < I40E_FLOW_TYPE_MAX) {
10728                 pctype_mask = adapter->pctypes_tbl[flow_type];
10729                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10730                         if (pctype_mask & (1ULL << i))
10731                                 return (enum i40e_filter_pctype)i;
10732                 }
10733         }
10734         return I40E_FILTER_PCTYPE_INVALID;
10735 }
10736
10737 uint16_t
10738 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10739                         enum i40e_filter_pctype pctype)
10740 {
10741         uint16_t flowtype;
10742         uint64_t pctype_mask = 1ULL << pctype;
10743
10744         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10745              flowtype++) {
10746                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10747                         return flowtype;
10748         }
10749
10750         return RTE_ETH_FLOW_UNKNOWN;
10751 }
10752
10753 /*
10754  * On X710, performance number is far from the expectation on recent firmware
10755  * versions; on XL710, performance number is also far from the expectation on
10756  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10757  * mode is enabled and port MAC address is equal to the packet destination MAC
10758  * address. The fix for this issue may not be integrated in the following
10759  * firmware version. So the workaround in software driver is needed. It needs
10760  * to modify the initial values of 3 internal only registers for both X710 and
10761  * XL710. Note that the values for X710 or XL710 could be different, and the
10762  * workaround can be removed when it is fixed in firmware in the future.
10763  */
10764
10765 /* For both X710 and XL710 */
10766 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10767 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10768 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10769
10770 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10771 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10772
10773 /* For X722 */
10774 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10775 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10776
10777 /* For X710 */
10778 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10779 /* For XL710 */
10780 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10781 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10782
10783 /*
10784  * GL_SWR_PM_UP_THR:
10785  * The value is not impacted from the link speed, its value is set according
10786  * to the total number of ports for a better pipe-monitor configuration.
10787  */
10788 static bool
10789 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10790 {
10791 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10792                 .device_id = (dev),   \
10793                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10794
10795 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10796                 .device_id = (dev),   \
10797                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10798
10799         static const struct {
10800                 uint16_t device_id;
10801                 uint32_t val;
10802         } swr_pm_table[] = {
10803                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10804                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10805                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10806                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10807                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10808
10809                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10810                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10811                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10812                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10813                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10814                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10815                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10816         };
10817         uint32_t i;
10818
10819         if (value == NULL) {
10820                 PMD_DRV_LOG(ERR, "value is NULL");
10821                 return false;
10822         }
10823
10824         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10825                 if (hw->device_id == swr_pm_table[i].device_id) {
10826                         *value = swr_pm_table[i].val;
10827
10828                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10829                                     "value - 0x%08x",
10830                                     hw->device_id, *value);
10831                         return true;
10832                 }
10833         }
10834
10835         return false;
10836 }
10837
10838 static int
10839 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10840 {
10841         enum i40e_status_code status;
10842         struct i40e_aq_get_phy_abilities_resp phy_ab;
10843         int ret = -ENOTSUP;
10844         int retries = 0;
10845
10846         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10847                                               NULL);
10848
10849         while (status) {
10850                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10851                         status);
10852                 retries++;
10853                 rte_delay_us(100000);
10854                 if  (retries < 5)
10855                         status = i40e_aq_get_phy_capabilities(hw, false,
10856                                         true, &phy_ab, NULL);
10857                 else
10858                         return ret;
10859         }
10860         return 0;
10861 }
10862
10863 static void
10864 i40e_configure_registers(struct i40e_hw *hw)
10865 {
10866         static struct {
10867                 uint32_t addr;
10868                 uint64_t val;
10869         } reg_table[] = {
10870                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10871                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10872                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10873         };
10874         uint64_t reg;
10875         uint32_t i;
10876         int ret;
10877
10878         for (i = 0; i < RTE_DIM(reg_table); i++) {
10879                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10880                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10881                                 reg_table[i].val =
10882                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10883                         else /* For X710/XL710/XXV710 */
10884                                 if (hw->aq.fw_maj_ver < 6)
10885                                         reg_table[i].val =
10886                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10887                                 else
10888                                         reg_table[i].val =
10889                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10890                 }
10891
10892                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10893                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10894                                 reg_table[i].val =
10895                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10896                         else /* For X710/XL710/XXV710 */
10897                                 reg_table[i].val =
10898                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10899                 }
10900
10901                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10902                         uint32_t cfg_val;
10903
10904                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10905                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10906                                             "GL_SWR_PM_UP_THR value fixup",
10907                                             hw->device_id);
10908                                 continue;
10909                         }
10910
10911                         reg_table[i].val = cfg_val;
10912                 }
10913
10914                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10915                                                         &reg, NULL);
10916                 if (ret < 0) {
10917                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10918                                                         reg_table[i].addr);
10919                         break;
10920                 }
10921                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10922                                                 reg_table[i].addr, reg);
10923                 if (reg == reg_table[i].val)
10924                         continue;
10925
10926                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10927                                                 reg_table[i].val, NULL);
10928                 if (ret < 0) {
10929                         PMD_DRV_LOG(ERR,
10930                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10931                                 reg_table[i].val, reg_table[i].addr);
10932                         break;
10933                 }
10934                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10935                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10936         }
10937 }
10938
10939 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10940 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10941 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10942 static int
10943 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10944 {
10945         uint32_t reg;
10946         int ret;
10947
10948         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10949                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10950                 return -EINVAL;
10951         }
10952
10953         /* Configure for double VLAN RX stripping */
10954         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10955         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10956                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10957                 ret = i40e_aq_debug_write_register(hw,
10958                                                    I40E_VSI_TSR(vsi->vsi_id),
10959                                                    reg, NULL);
10960                 if (ret < 0) {
10961                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10962                                     vsi->vsi_id);
10963                         return I40E_ERR_CONFIG;
10964                 }
10965         }
10966
10967         /* Configure for double VLAN TX insertion */
10968         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10969         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10970                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10971                 ret = i40e_aq_debug_write_register(hw,
10972                                                    I40E_VSI_L2TAGSTXVALID(
10973                                                    vsi->vsi_id), reg, NULL);
10974                 if (ret < 0) {
10975                         PMD_DRV_LOG(ERR,
10976                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10977                                 vsi->vsi_id);
10978                         return I40E_ERR_CONFIG;
10979                 }
10980         }
10981
10982         return 0;
10983 }
10984
10985 /**
10986  * i40e_aq_add_mirror_rule
10987  * @hw: pointer to the hardware structure
10988  * @seid: VEB seid to add mirror rule to
10989  * @dst_id: destination vsi seid
10990  * @entries: Buffer which contains the entities to be mirrored
10991  * @count: number of entities contained in the buffer
10992  * @rule_id:the rule_id of the rule to be added
10993  *
10994  * Add a mirror rule for a given veb.
10995  *
10996  **/
10997 static enum i40e_status_code
10998 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10999                         uint16_t seid, uint16_t dst_id,
11000                         uint16_t rule_type, uint16_t *entries,
11001                         uint16_t count, uint16_t *rule_id)
11002 {
11003         struct i40e_aq_desc desc;
11004         struct i40e_aqc_add_delete_mirror_rule cmd;
11005         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
11006                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
11007                 &desc.params.raw;
11008         uint16_t buff_len;
11009         enum i40e_status_code status;
11010
11011         i40e_fill_default_direct_cmd_desc(&desc,
11012                                           i40e_aqc_opc_add_mirror_rule);
11013         memset(&cmd, 0, sizeof(cmd));
11014
11015         buff_len = sizeof(uint16_t) * count;
11016         desc.datalen = rte_cpu_to_le_16(buff_len);
11017         if (buff_len > 0)
11018                 desc.flags |= rte_cpu_to_le_16(
11019                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
11020         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
11021                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
11022         cmd.num_entries = rte_cpu_to_le_16(count);
11023         cmd.seid = rte_cpu_to_le_16(seid);
11024         cmd.destination = rte_cpu_to_le_16(dst_id);
11025
11026         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
11027         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
11028         PMD_DRV_LOG(INFO,
11029                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
11030                 hw->aq.asq_last_status, resp->rule_id,
11031                 resp->mirror_rules_used, resp->mirror_rules_free);
11032         *rule_id = rte_le_to_cpu_16(resp->rule_id);
11033
11034         return status;
11035 }
11036
11037 /**
11038  * i40e_aq_del_mirror_rule
11039  * @hw: pointer to the hardware structure
11040  * @seid: VEB seid to add mirror rule to
11041  * @entries: Buffer which contains the entities to be mirrored
11042  * @count: number of entities contained in the buffer
11043  * @rule_id:the rule_id of the rule to be delete
11044  *
11045  * Delete a mirror rule for a given veb.
11046  *
11047  **/
11048 static enum i40e_status_code
11049 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
11050                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
11051                 uint16_t count, uint16_t rule_id)
11052 {
11053         struct i40e_aq_desc desc;
11054         struct i40e_aqc_add_delete_mirror_rule cmd;
11055         uint16_t buff_len = 0;
11056         enum i40e_status_code status;
11057         void *buff = NULL;
11058
11059         i40e_fill_default_direct_cmd_desc(&desc,
11060                                           i40e_aqc_opc_delete_mirror_rule);
11061         memset(&cmd, 0, sizeof(cmd));
11062         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
11063                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
11064                                                           I40E_AQ_FLAG_RD));
11065                 cmd.num_entries = count;
11066                 buff_len = sizeof(uint16_t) * count;
11067                 desc.datalen = rte_cpu_to_le_16(buff_len);
11068                 buff = (void *)entries;
11069         } else
11070                 /* rule id is filled in destination field for deleting mirror rule */
11071                 cmd.destination = rte_cpu_to_le_16(rule_id);
11072
11073         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
11074                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
11075         cmd.seid = rte_cpu_to_le_16(seid);
11076
11077         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
11078         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
11079
11080         return status;
11081 }
11082
11083 /**
11084  * i40e_mirror_rule_set
11085  * @dev: pointer to the hardware structure
11086  * @mirror_conf: mirror rule info
11087  * @sw_id: mirror rule's sw_id
11088  * @on: enable/disable
11089  *
11090  * set a mirror rule.
11091  *
11092  **/
11093 static int
11094 i40e_mirror_rule_set(struct rte_eth_dev *dev,
11095                         struct rte_eth_mirror_conf *mirror_conf,
11096                         uint8_t sw_id, uint8_t on)
11097 {
11098         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11099         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11100         struct i40e_mirror_rule *it, *mirr_rule = NULL;
11101         struct i40e_mirror_rule *parent = NULL;
11102         uint16_t seid, dst_seid, rule_id;
11103         uint16_t i, j = 0;
11104         int ret;
11105
11106         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
11107
11108         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
11109                 PMD_DRV_LOG(ERR,
11110                         "mirror rule can not be configured without veb or vfs.");
11111                 return -ENOSYS;
11112         }
11113         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
11114                 PMD_DRV_LOG(ERR, "mirror table is full.");
11115                 return -ENOSPC;
11116         }
11117         if (mirror_conf->dst_pool > pf->vf_num) {
11118                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
11119                                  mirror_conf->dst_pool);
11120                 return -EINVAL;
11121         }
11122
11123         seid = pf->main_vsi->veb->seid;
11124
11125         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11126                 if (sw_id <= it->index) {
11127                         mirr_rule = it;
11128                         break;
11129                 }
11130                 parent = it;
11131         }
11132         if (mirr_rule && sw_id == mirr_rule->index) {
11133                 if (on) {
11134                         PMD_DRV_LOG(ERR, "mirror rule exists.");
11135                         return -EEXIST;
11136                 } else {
11137                         ret = i40e_aq_del_mirror_rule(hw, seid,
11138                                         mirr_rule->rule_type,
11139                                         mirr_rule->entries,
11140                                         mirr_rule->num_entries, mirr_rule->id);
11141                         if (ret < 0) {
11142                                 PMD_DRV_LOG(ERR,
11143                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
11144                                         ret, hw->aq.asq_last_status);
11145                                 return -ENOSYS;
11146                         }
11147                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11148                         rte_free(mirr_rule);
11149                         pf->nb_mirror_rule--;
11150                         return 0;
11151                 }
11152         } else if (!on) {
11153                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11154                 return -ENOENT;
11155         }
11156
11157         mirr_rule = rte_zmalloc("i40e_mirror_rule",
11158                                 sizeof(struct i40e_mirror_rule) , 0);
11159         if (!mirr_rule) {
11160                 PMD_DRV_LOG(ERR, "failed to allocate memory");
11161                 return I40E_ERR_NO_MEMORY;
11162         }
11163         switch (mirror_conf->rule_type) {
11164         case ETH_MIRROR_VLAN:
11165                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
11166                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
11167                                 mirr_rule->entries[j] =
11168                                         mirror_conf->vlan.vlan_id[i];
11169                                 j++;
11170                         }
11171                 }
11172                 if (j == 0) {
11173                         PMD_DRV_LOG(ERR, "vlan is not specified.");
11174                         rte_free(mirr_rule);
11175                         return -EINVAL;
11176                 }
11177                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
11178                 break;
11179         case ETH_MIRROR_VIRTUAL_POOL_UP:
11180         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
11181                 /* check if the specified pool bit is out of range */
11182                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
11183                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
11184                         rte_free(mirr_rule);
11185                         return -EINVAL;
11186                 }
11187                 for (i = 0, j = 0; i < pf->vf_num; i++) {
11188                         if (mirror_conf->pool_mask & (1ULL << i)) {
11189                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
11190                                 j++;
11191                         }
11192                 }
11193                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
11194                         /* add pf vsi to entries */
11195                         mirr_rule->entries[j] = pf->main_vsi_seid;
11196                         j++;
11197                 }
11198                 if (j == 0) {
11199                         PMD_DRV_LOG(ERR, "pool is not specified.");
11200                         rte_free(mirr_rule);
11201                         return -EINVAL;
11202                 }
11203                 /* egress and ingress in aq commands means from switch but not port */
11204                 mirr_rule->rule_type =
11205                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
11206                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
11207                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
11208                 break;
11209         case ETH_MIRROR_UPLINK_PORT:
11210                 /* egress and ingress in aq commands means from switch but not port*/
11211                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
11212                 break;
11213         case ETH_MIRROR_DOWNLINK_PORT:
11214                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
11215                 break;
11216         default:
11217                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
11218                         mirror_conf->rule_type);
11219                 rte_free(mirr_rule);
11220                 return -EINVAL;
11221         }
11222
11223         /* If the dst_pool is equal to vf_num, consider it as PF */
11224         if (mirror_conf->dst_pool == pf->vf_num)
11225                 dst_seid = pf->main_vsi_seid;
11226         else
11227                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
11228
11229         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
11230                                       mirr_rule->rule_type, mirr_rule->entries,
11231                                       j, &rule_id);
11232         if (ret < 0) {
11233                 PMD_DRV_LOG(ERR,
11234                         "failed to add mirror rule: ret = %d, aq_err = %d.",
11235                         ret, hw->aq.asq_last_status);
11236                 rte_free(mirr_rule);
11237                 return -ENOSYS;
11238         }
11239
11240         mirr_rule->index = sw_id;
11241         mirr_rule->num_entries = j;
11242         mirr_rule->id = rule_id;
11243         mirr_rule->dst_vsi_seid = dst_seid;
11244
11245         if (parent)
11246                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
11247         else
11248                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
11249
11250         pf->nb_mirror_rule++;
11251         return 0;
11252 }
11253
11254 /**
11255  * i40e_mirror_rule_reset
11256  * @dev: pointer to the device
11257  * @sw_id: mirror rule's sw_id
11258  *
11259  * reset a mirror rule.
11260  *
11261  **/
11262 static int
11263 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
11264 {
11265         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11266         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11267         struct i40e_mirror_rule *it, *mirr_rule = NULL;
11268         uint16_t seid;
11269         int ret;
11270
11271         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
11272
11273         seid = pf->main_vsi->veb->seid;
11274
11275         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11276                 if (sw_id == it->index) {
11277                         mirr_rule = it;
11278                         break;
11279                 }
11280         }
11281         if (mirr_rule) {
11282                 ret = i40e_aq_del_mirror_rule(hw, seid,
11283                                 mirr_rule->rule_type,
11284                                 mirr_rule->entries,
11285                                 mirr_rule->num_entries, mirr_rule->id);
11286                 if (ret < 0) {
11287                         PMD_DRV_LOG(ERR,
11288                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
11289                                 ret, hw->aq.asq_last_status);
11290                         return -ENOSYS;
11291                 }
11292                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11293                 rte_free(mirr_rule);
11294                 pf->nb_mirror_rule--;
11295         } else {
11296                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11297                 return -ENOENT;
11298         }
11299         return 0;
11300 }
11301
11302 static uint64_t
11303 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
11304 {
11305         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11306         uint64_t systim_cycles;
11307
11308         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
11309         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
11310                         << 32;
11311
11312         return systim_cycles;
11313 }
11314
11315 static uint64_t
11316 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
11317 {
11318         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11319         uint64_t rx_tstamp;
11320
11321         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
11322         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
11323                         << 32;
11324
11325         return rx_tstamp;
11326 }
11327
11328 static uint64_t
11329 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
11330 {
11331         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11332         uint64_t tx_tstamp;
11333
11334         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
11335         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
11336                         << 32;
11337
11338         return tx_tstamp;
11339 }
11340
11341 static void
11342 i40e_start_timecounters(struct rte_eth_dev *dev)
11343 {
11344         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11345         struct i40e_adapter *adapter = dev->data->dev_private;
11346         struct rte_eth_link link;
11347         uint32_t tsync_inc_l;
11348         uint32_t tsync_inc_h;
11349
11350         /* Get current link speed. */
11351         i40e_dev_link_update(dev, 1);
11352         rte_eth_linkstatus_get(dev, &link);
11353
11354         switch (link.link_speed) {
11355         case ETH_SPEED_NUM_40G:
11356         case ETH_SPEED_NUM_25G:
11357                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
11358                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
11359                 break;
11360         case ETH_SPEED_NUM_10G:
11361                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
11362                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
11363                 break;
11364         case ETH_SPEED_NUM_1G:
11365                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
11366                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
11367                 break;
11368         default:
11369                 tsync_inc_l = 0x0;
11370                 tsync_inc_h = 0x0;
11371         }
11372
11373         /* Set the timesync increment value. */
11374         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
11375         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
11376
11377         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
11378         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11379         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11380
11381         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11382         adapter->systime_tc.cc_shift = 0;
11383         adapter->systime_tc.nsec_mask = 0;
11384
11385         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11386         adapter->rx_tstamp_tc.cc_shift = 0;
11387         adapter->rx_tstamp_tc.nsec_mask = 0;
11388
11389         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11390         adapter->tx_tstamp_tc.cc_shift = 0;
11391         adapter->tx_tstamp_tc.nsec_mask = 0;
11392 }
11393
11394 static int
11395 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
11396 {
11397         struct i40e_adapter *adapter = dev->data->dev_private;
11398
11399         adapter->systime_tc.nsec += delta;
11400         adapter->rx_tstamp_tc.nsec += delta;
11401         adapter->tx_tstamp_tc.nsec += delta;
11402
11403         return 0;
11404 }
11405
11406 static int
11407 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
11408 {
11409         uint64_t ns;
11410         struct i40e_adapter *adapter = dev->data->dev_private;
11411
11412         ns = rte_timespec_to_ns(ts);
11413
11414         /* Set the timecounters to a new value. */
11415         adapter->systime_tc.nsec = ns;
11416         adapter->rx_tstamp_tc.nsec = ns;
11417         adapter->tx_tstamp_tc.nsec = ns;
11418
11419         return 0;
11420 }
11421
11422 static int
11423 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
11424 {
11425         uint64_t ns, systime_cycles;
11426         struct i40e_adapter *adapter = dev->data->dev_private;
11427
11428         systime_cycles = i40e_read_systime_cyclecounter(dev);
11429         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
11430         *ts = rte_ns_to_timespec(ns);
11431
11432         return 0;
11433 }
11434
11435 static int
11436 i40e_timesync_enable(struct rte_eth_dev *dev)
11437 {
11438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11439         uint32_t tsync_ctl_l;
11440         uint32_t tsync_ctl_h;
11441
11442         /* Stop the timesync system time. */
11443         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11444         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11445         /* Reset the timesync system time value. */
11446         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
11447         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
11448
11449         i40e_start_timecounters(dev);
11450
11451         /* Clear timesync registers. */
11452         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11453         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
11454         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
11455         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
11456         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
11457         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
11458
11459         /* Enable timestamping of PTP packets. */
11460         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11461         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
11462
11463         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11464         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
11465         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
11466
11467         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11468         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11469
11470         return 0;
11471 }
11472
11473 static int
11474 i40e_timesync_disable(struct rte_eth_dev *dev)
11475 {
11476         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11477         uint32_t tsync_ctl_l;
11478         uint32_t tsync_ctl_h;
11479
11480         /* Disable timestamping of transmitted PTP packets. */
11481         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11482         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
11483
11484         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11485         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
11486
11487         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11488         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11489
11490         /* Reset the timesync increment value. */
11491         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11492         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11493
11494         return 0;
11495 }
11496
11497 static int
11498 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
11499                                 struct timespec *timestamp, uint32_t flags)
11500 {
11501         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11502         struct i40e_adapter *adapter = dev->data->dev_private;
11503         uint32_t sync_status;
11504         uint32_t index = flags & 0x03;
11505         uint64_t rx_tstamp_cycles;
11506         uint64_t ns;
11507
11508         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
11509         if ((sync_status & (1 << index)) == 0)
11510                 return -EINVAL;
11511
11512         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
11513         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
11514         *timestamp = rte_ns_to_timespec(ns);
11515
11516         return 0;
11517 }
11518
11519 static int
11520 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
11521                                 struct timespec *timestamp)
11522 {
11523         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11524         struct i40e_adapter *adapter = dev->data->dev_private;
11525         uint32_t sync_status;
11526         uint64_t tx_tstamp_cycles;
11527         uint64_t ns;
11528
11529         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11530         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
11531                 return -EINVAL;
11532
11533         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
11534         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
11535         *timestamp = rte_ns_to_timespec(ns);
11536
11537         return 0;
11538 }
11539
11540 /*
11541  * i40e_parse_dcb_configure - parse dcb configure from user
11542  * @dev: the device being configured
11543  * @dcb_cfg: pointer of the result of parse
11544  * @*tc_map: bit map of enabled traffic classes
11545  *
11546  * Returns 0 on success, negative value on failure
11547  */
11548 static int
11549 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
11550                          struct i40e_dcbx_config *dcb_cfg,
11551                          uint8_t *tc_map)
11552 {
11553         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
11554         uint8_t i, tc_bw, bw_lf;
11555
11556         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
11557
11558         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
11559         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
11560                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
11561                 return -EINVAL;
11562         }
11563
11564         /* assume each tc has the same bw */
11565         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
11566         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11567                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
11568         /* to ensure the sum of tcbw is equal to 100 */
11569         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
11570         for (i = 0; i < bw_lf; i++)
11571                 dcb_cfg->etscfg.tcbwtable[i]++;
11572
11573         /* assume each tc has the same Transmission Selection Algorithm */
11574         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11575                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
11576
11577         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11578                 dcb_cfg->etscfg.prioritytable[i] =
11579                                 dcb_rx_conf->dcb_tc[i];
11580
11581         /* FW needs one App to configure HW */
11582         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
11583         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
11584         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
11585         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
11586
11587         if (dcb_rx_conf->nb_tcs == 0)
11588                 *tc_map = 1; /* tc0 only */
11589         else
11590                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
11591
11592         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
11593                 dcb_cfg->pfc.willing = 0;
11594                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
11595                 dcb_cfg->pfc.pfcenable = *tc_map;
11596         }
11597         return 0;
11598 }
11599
11600
11601 static enum i40e_status_code
11602 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
11603                               struct i40e_aqc_vsi_properties_data *info,
11604                               uint8_t enabled_tcmap)
11605 {
11606         enum i40e_status_code ret;
11607         int i, total_tc = 0;
11608         uint16_t qpnum_per_tc, bsf, qp_idx;
11609         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
11610         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
11611         uint16_t used_queues;
11612
11613         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
11614         if (ret != I40E_SUCCESS)
11615                 return ret;
11616
11617         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11618                 if (enabled_tcmap & (1 << i))
11619                         total_tc++;
11620         }
11621         if (total_tc == 0)
11622                 total_tc = 1;
11623         vsi->enabled_tc = enabled_tcmap;
11624
11625         /* different VSI has different queues assigned */
11626         if (vsi->type == I40E_VSI_MAIN)
11627                 used_queues = dev_data->nb_rx_queues -
11628                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11629         else if (vsi->type == I40E_VSI_VMDQ2)
11630                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11631         else {
11632                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
11633                 return I40E_ERR_NO_AVAILABLE_VSI;
11634         }
11635
11636         qpnum_per_tc = used_queues / total_tc;
11637         /* Number of queues per enabled TC */
11638         if (qpnum_per_tc == 0) {
11639                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
11640                 return I40E_ERR_INVALID_QP_ID;
11641         }
11642         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
11643                                 I40E_MAX_Q_PER_TC);
11644         bsf = rte_bsf32(qpnum_per_tc);
11645
11646         /**
11647          * Configure TC and queue mapping parameters, for enabled TC,
11648          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
11649          * default queue will serve it.
11650          */
11651         qp_idx = 0;
11652         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11653                 if (vsi->enabled_tc & (1 << i)) {
11654                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
11655                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
11656                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
11657                         qp_idx += qpnum_per_tc;
11658                 } else
11659                         info->tc_mapping[i] = 0;
11660         }
11661
11662         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
11663         if (vsi->type == I40E_VSI_SRIOV) {
11664                 info->mapping_flags |=
11665                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
11666                 for (i = 0; i < vsi->nb_qps; i++)
11667                         info->queue_mapping[i] =
11668                                 rte_cpu_to_le_16(vsi->base_queue + i);
11669         } else {
11670                 info->mapping_flags |=
11671                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
11672                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
11673         }
11674         info->valid_sections |=
11675                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
11676
11677         return I40E_SUCCESS;
11678 }
11679
11680 /*
11681  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
11682  * @veb: VEB to be configured
11683  * @tc_map: enabled TC bitmap
11684  *
11685  * Returns 0 on success, negative value on failure
11686  */
11687 static enum i40e_status_code
11688 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
11689 {
11690         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
11691         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
11692         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
11693         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
11694         enum i40e_status_code ret = I40E_SUCCESS;
11695         int i;
11696         uint32_t bw_max;
11697
11698         /* Check if enabled_tc is same as existing or new TCs */
11699         if (veb->enabled_tc == tc_map)
11700                 return ret;
11701
11702         /* configure tc bandwidth */
11703         memset(&veb_bw, 0, sizeof(veb_bw));
11704         veb_bw.tc_valid_bits = tc_map;
11705         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11706         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11707                 if (tc_map & BIT_ULL(i))
11708                         veb_bw.tc_bw_share_credits[i] = 1;
11709         }
11710         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
11711                                                    &veb_bw, NULL);
11712         if (ret) {
11713                 PMD_INIT_LOG(ERR,
11714                         "AQ command Config switch_comp BW allocation per TC failed = %d",
11715                         hw->aq.asq_last_status);
11716                 return ret;
11717         }
11718
11719         memset(&ets_query, 0, sizeof(ets_query));
11720         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
11721                                                    &ets_query, NULL);
11722         if (ret != I40E_SUCCESS) {
11723                 PMD_DRV_LOG(ERR,
11724                         "Failed to get switch_comp ETS configuration %u",
11725                         hw->aq.asq_last_status);
11726                 return ret;
11727         }
11728         memset(&bw_query, 0, sizeof(bw_query));
11729         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
11730                                                   &bw_query, NULL);
11731         if (ret != I40E_SUCCESS) {
11732                 PMD_DRV_LOG(ERR,
11733                         "Failed to get switch_comp bandwidth configuration %u",
11734                         hw->aq.asq_last_status);
11735                 return ret;
11736         }
11737
11738         /* store and print out BW info */
11739         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
11740         veb->bw_info.bw_max = ets_query.tc_bw_max;
11741         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
11742         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
11743         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
11744                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
11745                      I40E_16_BIT_WIDTH);
11746         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11747                 veb->bw_info.bw_ets_share_credits[i] =
11748                                 bw_query.tc_bw_share_credits[i];
11749                 veb->bw_info.bw_ets_credits[i] =
11750                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11751                 /* 4 bits per TC, 4th bit is reserved */
11752                 veb->bw_info.bw_ets_max[i] =
11753                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11754                                   RTE_LEN2MASK(3, uint8_t));
11755                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11756                             veb->bw_info.bw_ets_share_credits[i]);
11757                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11758                             veb->bw_info.bw_ets_credits[i]);
11759                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11760                             veb->bw_info.bw_ets_max[i]);
11761         }
11762
11763         veb->enabled_tc = tc_map;
11764
11765         return ret;
11766 }
11767
11768
11769 /*
11770  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11771  * @vsi: VSI to be configured
11772  * @tc_map: enabled TC bitmap
11773  *
11774  * Returns 0 on success, negative value on failure
11775  */
11776 static enum i40e_status_code
11777 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11778 {
11779         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11780         struct i40e_vsi_context ctxt;
11781         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11782         enum i40e_status_code ret = I40E_SUCCESS;
11783         int i;
11784
11785         /* Check if enabled_tc is same as existing or new TCs */
11786         if (vsi->enabled_tc == tc_map)
11787                 return ret;
11788
11789         /* configure tc bandwidth */
11790         memset(&bw_data, 0, sizeof(bw_data));
11791         bw_data.tc_valid_bits = tc_map;
11792         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11793         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11794                 if (tc_map & BIT_ULL(i))
11795                         bw_data.tc_bw_credits[i] = 1;
11796         }
11797         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11798         if (ret) {
11799                 PMD_INIT_LOG(ERR,
11800                         "AQ command Config VSI BW allocation per TC failed = %d",
11801                         hw->aq.asq_last_status);
11802                 goto out;
11803         }
11804         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11805                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11806
11807         /* Update Queue Pairs Mapping for currently enabled UPs */
11808         ctxt.seid = vsi->seid;
11809         ctxt.pf_num = hw->pf_id;
11810         ctxt.vf_num = 0;
11811         ctxt.uplink_seid = vsi->uplink_seid;
11812         ctxt.info = vsi->info;
11813         i40e_get_cap(hw);
11814         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11815         if (ret)
11816                 goto out;
11817
11818         /* Update the VSI after updating the VSI queue-mapping information */
11819         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11820         if (ret) {
11821                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11822                         hw->aq.asq_last_status);
11823                 goto out;
11824         }
11825         /* update the local VSI info with updated queue map */
11826         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11827                                         sizeof(vsi->info.tc_mapping));
11828         rte_memcpy(&vsi->info.queue_mapping,
11829                         &ctxt.info.queue_mapping,
11830                 sizeof(vsi->info.queue_mapping));
11831         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11832         vsi->info.valid_sections = 0;
11833
11834         /* query and update current VSI BW information */
11835         ret = i40e_vsi_get_bw_config(vsi);
11836         if (ret) {
11837                 PMD_INIT_LOG(ERR,
11838                          "Failed updating vsi bw info, err %s aq_err %s",
11839                          i40e_stat_str(hw, ret),
11840                          i40e_aq_str(hw, hw->aq.asq_last_status));
11841                 goto out;
11842         }
11843
11844         vsi->enabled_tc = tc_map;
11845
11846 out:
11847         return ret;
11848 }
11849
11850 /*
11851  * i40e_dcb_hw_configure - program the dcb setting to hw
11852  * @pf: pf the configuration is taken on
11853  * @new_cfg: new configuration
11854  * @tc_map: enabled TC bitmap
11855  *
11856  * Returns 0 on success, negative value on failure
11857  */
11858 static enum i40e_status_code
11859 i40e_dcb_hw_configure(struct i40e_pf *pf,
11860                       struct i40e_dcbx_config *new_cfg,
11861                       uint8_t tc_map)
11862 {
11863         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11864         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11865         struct i40e_vsi *main_vsi = pf->main_vsi;
11866         struct i40e_vsi_list *vsi_list;
11867         enum i40e_status_code ret;
11868         int i;
11869         uint32_t val;
11870
11871         /* Use the FW API if FW > v4.4*/
11872         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11873               (hw->aq.fw_maj_ver >= 5))) {
11874                 PMD_INIT_LOG(ERR,
11875                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11876                 return I40E_ERR_FIRMWARE_API_VERSION;
11877         }
11878
11879         /* Check if need reconfiguration */
11880         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11881                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11882                 return I40E_SUCCESS;
11883         }
11884
11885         /* Copy the new config to the current config */
11886         *old_cfg = *new_cfg;
11887         old_cfg->etsrec = old_cfg->etscfg;
11888         ret = i40e_set_dcb_config(hw);
11889         if (ret) {
11890                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11891                          i40e_stat_str(hw, ret),
11892                          i40e_aq_str(hw, hw->aq.asq_last_status));
11893                 return ret;
11894         }
11895         /* set receive Arbiter to RR mode and ETS scheme by default */
11896         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11897                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11898                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11899                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11900                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11901                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11902                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11903                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11904                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11905                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11906                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11907                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11908                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11909         }
11910         /* get local mib to check whether it is configured correctly */
11911         /* IEEE mode */
11912         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11913         /* Get Local DCB Config */
11914         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11915                                      &hw->local_dcbx_config);
11916
11917         /* if Veb is created, need to update TC of it at first */
11918         if (main_vsi->veb) {
11919                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11920                 if (ret)
11921                         PMD_INIT_LOG(WARNING,
11922                                  "Failed configuring TC for VEB seid=%d",
11923                                  main_vsi->veb->seid);
11924         }
11925         /* Update each VSI */
11926         i40e_vsi_config_tc(main_vsi, tc_map);
11927         if (main_vsi->veb) {
11928                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11929                         /* Beside main VSI and VMDQ VSIs, only enable default
11930                          * TC for other VSIs
11931                          */
11932                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11933                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11934                                                          tc_map);
11935                         else
11936                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11937                                                          I40E_DEFAULT_TCMAP);
11938                         if (ret)
11939                                 PMD_INIT_LOG(WARNING,
11940                                         "Failed configuring TC for VSI seid=%d",
11941                                         vsi_list->vsi->seid);
11942                         /* continue */
11943                 }
11944         }
11945         return I40E_SUCCESS;
11946 }
11947
11948 /*
11949  * i40e_dcb_init_configure - initial dcb config
11950  * @dev: device being configured
11951  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11952  *
11953  * Returns 0 on success, negative value on failure
11954  */
11955 int
11956 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11957 {
11958         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11959         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11960         int i, ret = 0;
11961
11962         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11963                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11964                 return -ENOTSUP;
11965         }
11966
11967         /* DCB initialization:
11968          * Update DCB configuration from the Firmware and configure
11969          * LLDP MIB change event.
11970          */
11971         if (sw_dcb == TRUE) {
11972                 /* Stopping lldp is necessary for DPDK, but it will cause
11973                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11974                  * for successful initialization of DCB is that LLDP is
11975                  * enabled. So it is needed to start lldp before DCB init
11976                  * and stop it after initialization.
11977                  */
11978                 ret = i40e_aq_start_lldp(hw, true, NULL);
11979                 if (ret != I40E_SUCCESS)
11980                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11981
11982                 ret = i40e_init_dcb(hw, true);
11983                 /* If lldp agent is stopped, the return value from
11984                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11985                  * adminq status. Otherwise, it should return success.
11986                  */
11987                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11988                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11989                         memset(&hw->local_dcbx_config, 0,
11990                                 sizeof(struct i40e_dcbx_config));
11991                         /* set dcb default configuration */
11992                         hw->local_dcbx_config.etscfg.willing = 0;
11993                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11994                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11995                         hw->local_dcbx_config.etscfg.tsatable[0] =
11996                                                 I40E_IEEE_TSA_ETS;
11997                         /* all UPs mapping to TC0 */
11998                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11999                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
12000                         hw->local_dcbx_config.etsrec =
12001                                 hw->local_dcbx_config.etscfg;
12002                         hw->local_dcbx_config.pfc.willing = 0;
12003                         hw->local_dcbx_config.pfc.pfccap =
12004                                                 I40E_MAX_TRAFFIC_CLASS;
12005                         /* FW needs one App to configure HW */
12006                         hw->local_dcbx_config.numapps = 1;
12007                         hw->local_dcbx_config.app[0].selector =
12008                                                 I40E_APP_SEL_ETHTYPE;
12009                         hw->local_dcbx_config.app[0].priority = 3;
12010                         hw->local_dcbx_config.app[0].protocolid =
12011                                                 I40E_APP_PROTOID_FCOE;
12012                         ret = i40e_set_dcb_config(hw);
12013                         if (ret) {
12014                                 PMD_INIT_LOG(ERR,
12015                                         "default dcb config fails. err = %d, aq_err = %d.",
12016                                         ret, hw->aq.asq_last_status);
12017                                 return -ENOSYS;
12018                         }
12019                 } else {
12020                         PMD_INIT_LOG(ERR,
12021                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
12022                                 ret, hw->aq.asq_last_status);
12023                         return -ENOTSUP;
12024                 }
12025
12026                 if (i40e_need_stop_lldp(dev)) {
12027                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
12028                         if (ret != I40E_SUCCESS)
12029                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
12030                 }
12031         } else {
12032                 ret = i40e_aq_start_lldp(hw, true, NULL);
12033                 if (ret != I40E_SUCCESS)
12034                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
12035
12036                 ret = i40e_init_dcb(hw, true);
12037                 if (!ret) {
12038                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
12039                                 PMD_INIT_LOG(ERR,
12040                                         "HW doesn't support DCBX offload.");
12041                                 return -ENOTSUP;
12042                         }
12043                 } else {
12044                         PMD_INIT_LOG(ERR,
12045                                 "DCBX configuration failed, err = %d, aq_err = %d.",
12046                                 ret, hw->aq.asq_last_status);
12047                         return -ENOTSUP;
12048                 }
12049         }
12050         return 0;
12051 }
12052
12053 /*
12054  * i40e_dcb_setup - setup dcb related config
12055  * @dev: device being configured
12056  *
12057  * Returns 0 on success, negative value on failure
12058  */
12059 static int
12060 i40e_dcb_setup(struct rte_eth_dev *dev)
12061 {
12062         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12063         struct i40e_dcbx_config dcb_cfg;
12064         uint8_t tc_map = 0;
12065         int ret = 0;
12066
12067         if ((pf->flags & I40E_FLAG_DCB) == 0) {
12068                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
12069                 return -ENOTSUP;
12070         }
12071
12072         if (pf->vf_num != 0)
12073                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
12074
12075         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
12076         if (ret) {
12077                 PMD_INIT_LOG(ERR, "invalid dcb config");
12078                 return -EINVAL;
12079         }
12080         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
12081         if (ret) {
12082                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
12083                 return -ENOSYS;
12084         }
12085
12086         return 0;
12087 }
12088
12089 static int
12090 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
12091                       struct rte_eth_dcb_info *dcb_info)
12092 {
12093         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12094         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12095         struct i40e_vsi *vsi = pf->main_vsi;
12096         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
12097         uint16_t bsf, tc_mapping;
12098         int i, j = 0;
12099
12100         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
12101                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
12102         else
12103                 dcb_info->nb_tcs = 1;
12104         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
12105                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
12106         for (i = 0; i < dcb_info->nb_tcs; i++)
12107                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
12108
12109         /* get queue mapping if vmdq is disabled */
12110         if (!pf->nb_cfg_vmdq_vsi) {
12111                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12112                         if (!(vsi->enabled_tc & (1 << i)))
12113                                 continue;
12114                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
12115                         dcb_info->tc_queue.tc_rxq[j][i].base =
12116                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12117                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12118                         dcb_info->tc_queue.tc_txq[j][i].base =
12119                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12120                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12121                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12122                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12123                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12124                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12125                 }
12126                 return 0;
12127         }
12128
12129         /* get queue mapping if vmdq is enabled */
12130         do {
12131                 vsi = pf->vmdq[j].vsi;
12132                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12133                         if (!(vsi->enabled_tc & (1 << i)))
12134                                 continue;
12135                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
12136                         dcb_info->tc_queue.tc_rxq[j][i].base =
12137                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12138                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12139                         dcb_info->tc_queue.tc_txq[j][i].base =
12140                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12141                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12142                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12143                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12144                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12145                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12146                 }
12147                 j++;
12148         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
12149         return 0;
12150 }
12151
12152 static int
12153 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
12154 {
12155         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12156         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12157         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12158         uint16_t msix_intr;
12159
12160         msix_intr = intr_handle->intr_vec[queue_id];
12161         if (msix_intr == I40E_MISC_VEC_ID)
12162                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12163                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
12164                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
12165                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12166         else
12167                 I40E_WRITE_REG(hw,
12168                                I40E_PFINT_DYN_CTLN(msix_intr -
12169                                                    I40E_RX_VEC_START),
12170                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
12171                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
12172                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12173
12174         I40E_WRITE_FLUSH(hw);
12175         rte_intr_ack(&pci_dev->intr_handle);
12176
12177         return 0;
12178 }
12179
12180 static int
12181 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
12182 {
12183         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12184         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12185         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12186         uint16_t msix_intr;
12187
12188         msix_intr = intr_handle->intr_vec[queue_id];
12189         if (msix_intr == I40E_MISC_VEC_ID)
12190                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12191                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12192         else
12193                 I40E_WRITE_REG(hw,
12194                                I40E_PFINT_DYN_CTLN(msix_intr -
12195                                                    I40E_RX_VEC_START),
12196                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12197         I40E_WRITE_FLUSH(hw);
12198
12199         return 0;
12200 }
12201
12202 /**
12203  * This function is used to check if the register is valid.
12204  * Below is the valid registers list for X722 only:
12205  * 0x2b800--0x2bb00
12206  * 0x38700--0x38a00
12207  * 0x3d800--0x3db00
12208  * 0x208e00--0x209000
12209  * 0x20be00--0x20c000
12210  * 0x263c00--0x264000
12211  * 0x265c00--0x266000
12212  */
12213 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
12214 {
12215         if ((type != I40E_MAC_X722) &&
12216             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
12217              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
12218              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
12219              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
12220              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
12221              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
12222              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
12223                 return 0;
12224         else
12225                 return 1;
12226 }
12227
12228 static int i40e_get_regs(struct rte_eth_dev *dev,
12229                          struct rte_dev_reg_info *regs)
12230 {
12231         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12232         uint32_t *ptr_data = regs->data;
12233         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
12234         const struct i40e_reg_info *reg_info;
12235
12236         if (ptr_data == NULL) {
12237                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
12238                 regs->width = sizeof(uint32_t);
12239                 return 0;
12240         }
12241
12242         /* The first few registers have to be read using AQ operations */
12243         reg_idx = 0;
12244         while (i40e_regs_adminq[reg_idx].name) {
12245                 reg_info = &i40e_regs_adminq[reg_idx++];
12246                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12247                         for (arr_idx2 = 0;
12248                                         arr_idx2 <= reg_info->count2;
12249                                         arr_idx2++) {
12250                                 reg_offset = arr_idx * reg_info->stride1 +
12251                                         arr_idx2 * reg_info->stride2;
12252                                 reg_offset += reg_info->base_addr;
12253                                 ptr_data[reg_offset >> 2] =
12254                                         i40e_read_rx_ctl(hw, reg_offset);
12255                         }
12256         }
12257
12258         /* The remaining registers can be read using primitives */
12259         reg_idx = 0;
12260         while (i40e_regs_others[reg_idx].name) {
12261                 reg_info = &i40e_regs_others[reg_idx++];
12262                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12263                         for (arr_idx2 = 0;
12264                                         arr_idx2 <= reg_info->count2;
12265                                         arr_idx2++) {
12266                                 reg_offset = arr_idx * reg_info->stride1 +
12267                                         arr_idx2 * reg_info->stride2;
12268                                 reg_offset += reg_info->base_addr;
12269                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
12270                                         ptr_data[reg_offset >> 2] = 0;
12271                                 else
12272                                         ptr_data[reg_offset >> 2] =
12273                                                 I40E_READ_REG(hw, reg_offset);
12274                         }
12275         }
12276
12277         return 0;
12278 }
12279
12280 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
12281 {
12282         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12283
12284         /* Convert word count to byte count */
12285         return hw->nvm.sr_size << 1;
12286 }
12287
12288 static int i40e_get_eeprom(struct rte_eth_dev *dev,
12289                            struct rte_dev_eeprom_info *eeprom)
12290 {
12291         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12292         uint16_t *data = eeprom->data;
12293         uint16_t offset, length, cnt_words;
12294         int ret_code;
12295
12296         offset = eeprom->offset >> 1;
12297         length = eeprom->length >> 1;
12298         cnt_words = length;
12299
12300         if (offset > hw->nvm.sr_size ||
12301                 offset + length > hw->nvm.sr_size) {
12302                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
12303                 return -EINVAL;
12304         }
12305
12306         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
12307
12308         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
12309         if (ret_code != I40E_SUCCESS || cnt_words != length) {
12310                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
12311                 return -EIO;
12312         }
12313
12314         return 0;
12315 }
12316
12317 static int i40e_get_module_info(struct rte_eth_dev *dev,
12318                                 struct rte_eth_dev_module_info *modinfo)
12319 {
12320         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12321         uint32_t sff8472_comp = 0;
12322         uint32_t sff8472_swap = 0;
12323         uint32_t sff8636_rev = 0;
12324         i40e_status status;
12325         uint32_t type = 0;
12326
12327         /* Check if firmware supports reading module EEPROM. */
12328         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
12329                 PMD_DRV_LOG(ERR,
12330                             "Module EEPROM memory read not supported. "
12331                             "Please update the NVM image.\n");
12332                 return -EINVAL;
12333         }
12334
12335         status = i40e_update_link_info(hw);
12336         if (status)
12337                 return -EIO;
12338
12339         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
12340                 PMD_DRV_LOG(ERR,
12341                             "Cannot read module EEPROM memory. "
12342                             "No module connected.\n");
12343                 return -EINVAL;
12344         }
12345
12346         type = hw->phy.link_info.module_type[0];
12347
12348         switch (type) {
12349         case I40E_MODULE_TYPE_SFP:
12350                 status = i40e_aq_get_phy_register(hw,
12351                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12352                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12353                                 I40E_MODULE_SFF_8472_COMP,
12354                                 &sff8472_comp, NULL);
12355                 if (status)
12356                         return -EIO;
12357
12358                 status = i40e_aq_get_phy_register(hw,
12359                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12360                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12361                                 I40E_MODULE_SFF_8472_SWAP,
12362                                 &sff8472_swap, NULL);
12363                 if (status)
12364                         return -EIO;
12365
12366                 /* Check if the module requires address swap to access
12367                  * the other EEPROM memory page.
12368                  */
12369                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
12370                         PMD_DRV_LOG(WARNING,
12371                                     "Module address swap to access "
12372                                     "page 0xA2 is not supported.\n");
12373                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12374                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12375                 } else if (sff8472_comp == 0x00) {
12376                         /* Module is not SFF-8472 compliant */
12377                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12378                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12379                 } else {
12380                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
12381                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
12382                 }
12383                 break;
12384         case I40E_MODULE_TYPE_QSFP_PLUS:
12385                 /* Read from memory page 0. */
12386                 status = i40e_aq_get_phy_register(hw,
12387                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12388                                 0, 1,
12389                                 I40E_MODULE_REVISION_ADDR,
12390                                 &sff8636_rev, NULL);
12391                 if (status)
12392                         return -EIO;
12393                 /* Determine revision compliance byte */
12394                 if (sff8636_rev > 0x02) {
12395                         /* Module is SFF-8636 compliant */
12396                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
12397                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12398                 } else {
12399                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
12400                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12401                 }
12402                 break;
12403         case I40E_MODULE_TYPE_QSFP28:
12404                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
12405                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12406                 break;
12407         default:
12408                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
12409                 return -EINVAL;
12410         }
12411         return 0;
12412 }
12413
12414 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
12415                                   struct rte_dev_eeprom_info *info)
12416 {
12417         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12418         bool is_sfp = false;
12419         i40e_status status;
12420         uint8_t *data;
12421         uint32_t value = 0;
12422         uint32_t i;
12423
12424         if (!info || !info->length || !info->data)
12425                 return -EINVAL;
12426
12427         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
12428                 is_sfp = true;
12429
12430         data = info->data;
12431         for (i = 0; i < info->length; i++) {
12432                 u32 offset = i + info->offset;
12433                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
12434
12435                 /* Check if we need to access the other memory page */
12436                 if (is_sfp) {
12437                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
12438                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
12439                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
12440                         }
12441                 } else {
12442                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
12443                                 /* Compute memory page number and offset. */
12444                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
12445                                 addr++;
12446                         }
12447                 }
12448                 status = i40e_aq_get_phy_register(hw,
12449                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12450                                 addr, 1, offset, &value, NULL);
12451                 if (status)
12452                         return -EIO;
12453                 data[i] = (uint8_t)value;
12454         }
12455         return 0;
12456 }
12457
12458 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
12459                                      struct rte_ether_addr *mac_addr)
12460 {
12461         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12462         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12463         struct i40e_vsi *vsi = pf->main_vsi;
12464         struct i40e_mac_filter_info mac_filter;
12465         struct i40e_mac_filter *f;
12466         int ret;
12467
12468         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
12469                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
12470                 return -EINVAL;
12471         }
12472
12473         TAILQ_FOREACH(f, &vsi->mac_list, next) {
12474                 if (rte_is_same_ether_addr(&pf->dev_addr,
12475                                                 &f->mac_info.mac_addr))
12476                         break;
12477         }
12478
12479         if (f == NULL) {
12480                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
12481                 return -EIO;
12482         }
12483
12484         mac_filter = f->mac_info;
12485         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
12486         if (ret != I40E_SUCCESS) {
12487                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
12488                 return -EIO;
12489         }
12490         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
12491         ret = i40e_vsi_add_mac(vsi, &mac_filter);
12492         if (ret != I40E_SUCCESS) {
12493                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
12494                 return -EIO;
12495         }
12496         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
12497
12498         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
12499                                         mac_addr->addr_bytes, NULL);
12500         if (ret != I40E_SUCCESS) {
12501                 PMD_DRV_LOG(ERR, "Failed to change mac");
12502                 return -EIO;
12503         }
12504
12505         return 0;
12506 }
12507
12508 static int
12509 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
12510 {
12511         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12512         struct rte_eth_dev_data *dev_data = pf->dev_data;
12513         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
12514         int ret = 0;
12515
12516         /* check if mtu is within the allowed range */
12517         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
12518                 return -EINVAL;
12519
12520         /* mtu setting is forbidden if port is start */
12521         if (dev_data->dev_started) {
12522                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
12523                             dev_data->port_id);
12524                 return -EBUSY;
12525         }
12526
12527         if (frame_size > RTE_ETHER_MAX_LEN)
12528                 dev_data->dev_conf.rxmode.offloads |=
12529                         DEV_RX_OFFLOAD_JUMBO_FRAME;
12530         else
12531                 dev_data->dev_conf.rxmode.offloads &=
12532                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
12533
12534         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
12535
12536         return ret;
12537 }
12538
12539 /* Restore ethertype filter */
12540 static void
12541 i40e_ethertype_filter_restore(struct i40e_pf *pf)
12542 {
12543         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12544         struct i40e_ethertype_filter_list
12545                 *ethertype_list = &pf->ethertype.ethertype_list;
12546         struct i40e_ethertype_filter *f;
12547         struct i40e_control_filter_stats stats;
12548         uint16_t flags;
12549
12550         TAILQ_FOREACH(f, ethertype_list, rules) {
12551                 flags = 0;
12552                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
12553                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
12554                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
12555                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
12556                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
12557
12558                 memset(&stats, 0, sizeof(stats));
12559                 i40e_aq_add_rem_control_packet_filter(hw,
12560                                             f->input.mac_addr.addr_bytes,
12561                                             f->input.ether_type,
12562                                             flags, pf->main_vsi->seid,
12563                                             f->queue, 1, &stats, NULL);
12564         }
12565         PMD_DRV_LOG(INFO, "Ethertype filter:"
12566                     " mac_etype_used = %u, etype_used = %u,"
12567                     " mac_etype_free = %u, etype_free = %u",
12568                     stats.mac_etype_used, stats.etype_used,
12569                     stats.mac_etype_free, stats.etype_free);
12570 }
12571
12572 /* Restore tunnel filter */
12573 static void
12574 i40e_tunnel_filter_restore(struct i40e_pf *pf)
12575 {
12576         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12577         struct i40e_vsi *vsi;
12578         struct i40e_pf_vf *vf;
12579         struct i40e_tunnel_filter_list
12580                 *tunnel_list = &pf->tunnel.tunnel_list;
12581         struct i40e_tunnel_filter *f;
12582         struct i40e_aqc_cloud_filters_element_bb cld_filter;
12583         bool big_buffer = 0;
12584
12585         TAILQ_FOREACH(f, tunnel_list, rules) {
12586                 if (!f->is_to_vf)
12587                         vsi = pf->main_vsi;
12588                 else {
12589                         vf = &pf->vfs[f->vf_id];
12590                         vsi = vf->vsi;
12591                 }
12592                 memset(&cld_filter, 0, sizeof(cld_filter));
12593                 rte_ether_addr_copy((struct rte_ether_addr *)
12594                                 &f->input.outer_mac,
12595                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
12596                 rte_ether_addr_copy((struct rte_ether_addr *)
12597                                 &f->input.inner_mac,
12598                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
12599                 cld_filter.element.inner_vlan = f->input.inner_vlan;
12600                 cld_filter.element.flags = f->input.flags;
12601                 cld_filter.element.tenant_id = f->input.tenant_id;
12602                 cld_filter.element.queue_number = f->queue;
12603                 rte_memcpy(cld_filter.general_fields,
12604                            f->input.general_fields,
12605                            sizeof(f->input.general_fields));
12606
12607                 if (((f->input.flags &
12608                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
12609                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
12610                     ((f->input.flags &
12611                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
12612                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
12613                     ((f->input.flags &
12614                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
12615                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
12616                         big_buffer = 1;
12617
12618                 if (big_buffer)
12619                         i40e_aq_add_cloud_filters_bb(hw,
12620                                         vsi->seid, &cld_filter, 1);
12621                 else
12622                         i40e_aq_add_cloud_filters(hw, vsi->seid,
12623                                                   &cld_filter.element, 1);
12624         }
12625 }
12626
12627 /* Restore RSS filter */
12628 static inline void
12629 i40e_rss_filter_restore(struct i40e_pf *pf)
12630 {
12631         struct i40e_rss_conf_list *list = &pf->rss_config_list;
12632         struct i40e_rss_filter *filter;
12633
12634         TAILQ_FOREACH(filter, list, next) {
12635                 i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
12636         }
12637 }
12638
12639 static void
12640 i40e_filter_restore(struct i40e_pf *pf)
12641 {
12642         i40e_ethertype_filter_restore(pf);
12643         i40e_tunnel_filter_restore(pf);
12644         i40e_fdir_filter_restore(pf);
12645         i40e_rss_filter_restore(pf);
12646 }
12647
12648 bool
12649 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
12650 {
12651         if (strcmp(dev->device->driver->name, drv->driver.name))
12652                 return false;
12653
12654         return true;
12655 }
12656
12657 bool
12658 is_i40e_supported(struct rte_eth_dev *dev)
12659 {
12660         return is_device_supported(dev, &rte_i40e_pmd);
12661 }
12662
12663 struct i40e_customized_pctype*
12664 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
12665 {
12666         int i;
12667
12668         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
12669                 if (pf->customized_pctype[i].index == index)
12670                         return &pf->customized_pctype[i];
12671         }
12672         return NULL;
12673 }
12674
12675 static int
12676 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
12677                               uint32_t pkg_size, uint32_t proto_num,
12678                               struct rte_pmd_i40e_proto_info *proto,
12679                               enum rte_pmd_i40e_package_op op)
12680 {
12681         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12682         uint32_t pctype_num;
12683         struct rte_pmd_i40e_ptype_info *pctype;
12684         uint32_t buff_size;
12685         struct i40e_customized_pctype *new_pctype = NULL;
12686         uint8_t proto_id;
12687         uint8_t pctype_value;
12688         char name[64];
12689         uint32_t i, j, n;
12690         int ret;
12691
12692         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12693             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12694                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12695                 return -1;
12696         }
12697
12698         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12699                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
12700                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
12701         if (ret) {
12702                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
12703                 return -1;
12704         }
12705         if (!pctype_num) {
12706                 PMD_DRV_LOG(INFO, "No new pctype added");
12707                 return -1;
12708         }
12709
12710         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
12711         pctype = rte_zmalloc("new_pctype", buff_size, 0);
12712         if (!pctype) {
12713                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12714                 return -1;
12715         }
12716         /* get information about new pctype list */
12717         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12718                                         (uint8_t *)pctype, buff_size,
12719                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
12720         if (ret) {
12721                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
12722                 rte_free(pctype);
12723                 return -1;
12724         }
12725
12726         /* Update customized pctype. */
12727         for (i = 0; i < pctype_num; i++) {
12728                 pctype_value = pctype[i].ptype_id;
12729                 memset(name, 0, sizeof(name));
12730                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12731                         proto_id = pctype[i].protocols[j];
12732                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12733                                 continue;
12734                         for (n = 0; n < proto_num; n++) {
12735                                 if (proto[n].proto_id != proto_id)
12736                                         continue;
12737                                 strlcat(name, proto[n].name, sizeof(name));
12738                                 strlcat(name, "_", sizeof(name));
12739                                 break;
12740                         }
12741                 }
12742                 name[strlen(name) - 1] = '\0';
12743                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12744                 if (!strcmp(name, "GTPC"))
12745                         new_pctype =
12746                                 i40e_find_customized_pctype(pf,
12747                                                       I40E_CUSTOMIZED_GTPC);
12748                 else if (!strcmp(name, "GTPU_IPV4"))
12749                         new_pctype =
12750                                 i40e_find_customized_pctype(pf,
12751                                                    I40E_CUSTOMIZED_GTPU_IPV4);
12752                 else if (!strcmp(name, "GTPU_IPV6"))
12753                         new_pctype =
12754                                 i40e_find_customized_pctype(pf,
12755                                                    I40E_CUSTOMIZED_GTPU_IPV6);
12756                 else if (!strcmp(name, "GTPU"))
12757                         new_pctype =
12758                                 i40e_find_customized_pctype(pf,
12759                                                       I40E_CUSTOMIZED_GTPU);
12760                 else if (!strcmp(name, "IPV4_L2TPV3"))
12761                         new_pctype =
12762                                 i40e_find_customized_pctype(pf,
12763                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
12764                 else if (!strcmp(name, "IPV6_L2TPV3"))
12765                         new_pctype =
12766                                 i40e_find_customized_pctype(pf,
12767                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
12768                 else if (!strcmp(name, "IPV4_ESP"))
12769                         new_pctype =
12770                                 i40e_find_customized_pctype(pf,
12771                                                 I40E_CUSTOMIZED_ESP_IPV4);
12772                 else if (!strcmp(name, "IPV6_ESP"))
12773                         new_pctype =
12774                                 i40e_find_customized_pctype(pf,
12775                                                 I40E_CUSTOMIZED_ESP_IPV6);
12776                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12777                         new_pctype =
12778                                 i40e_find_customized_pctype(pf,
12779                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12780                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12781                         new_pctype =
12782                                 i40e_find_customized_pctype(pf,
12783                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12784                 else if (!strcmp(name, "IPV4_AH"))
12785                         new_pctype =
12786                                 i40e_find_customized_pctype(pf,
12787                                                 I40E_CUSTOMIZED_AH_IPV4);
12788                 else if (!strcmp(name, "IPV6_AH"))
12789                         new_pctype =
12790                                 i40e_find_customized_pctype(pf,
12791                                                 I40E_CUSTOMIZED_AH_IPV6);
12792                 if (new_pctype) {
12793                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12794                                 new_pctype->pctype = pctype_value;
12795                                 new_pctype->valid = true;
12796                         } else {
12797                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12798                                 new_pctype->valid = false;
12799                         }
12800                 }
12801         }
12802
12803         rte_free(pctype);
12804         return 0;
12805 }
12806
12807 static int
12808 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12809                              uint32_t pkg_size, uint32_t proto_num,
12810                              struct rte_pmd_i40e_proto_info *proto,
12811                              enum rte_pmd_i40e_package_op op)
12812 {
12813         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12814         uint16_t port_id = dev->data->port_id;
12815         uint32_t ptype_num;
12816         struct rte_pmd_i40e_ptype_info *ptype;
12817         uint32_t buff_size;
12818         uint8_t proto_id;
12819         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12820         uint32_t i, j, n;
12821         bool in_tunnel;
12822         int ret;
12823
12824         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12825             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12826                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12827                 return -1;
12828         }
12829
12830         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12831                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12832                 return 0;
12833         }
12834
12835         /* get information about new ptype num */
12836         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12837                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12838                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12839         if (ret) {
12840                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12841                 return ret;
12842         }
12843         if (!ptype_num) {
12844                 PMD_DRV_LOG(INFO, "No new ptype added");
12845                 return -1;
12846         }
12847
12848         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12849         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12850         if (!ptype) {
12851                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12852                 return -1;
12853         }
12854
12855         /* get information about new ptype list */
12856         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12857                                         (uint8_t *)ptype, buff_size,
12858                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12859         if (ret) {
12860                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12861                 rte_free(ptype);
12862                 return ret;
12863         }
12864
12865         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12866         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12867         if (!ptype_mapping) {
12868                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12869                 rte_free(ptype);
12870                 return -1;
12871         }
12872
12873         /* Update ptype mapping table. */
12874         for (i = 0; i < ptype_num; i++) {
12875                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12876                 ptype_mapping[i].sw_ptype = 0;
12877                 in_tunnel = false;
12878                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12879                         proto_id = ptype[i].protocols[j];
12880                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12881                                 continue;
12882                         for (n = 0; n < proto_num; n++) {
12883                                 if (proto[n].proto_id != proto_id)
12884                                         continue;
12885                                 memset(name, 0, sizeof(name));
12886                                 strcpy(name, proto[n].name);
12887                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12888                                 if (!strncasecmp(name, "PPPOE", 5))
12889                                         ptype_mapping[i].sw_ptype |=
12890                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12891                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12892                                          !in_tunnel) {
12893                                         ptype_mapping[i].sw_ptype |=
12894                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12895                                         ptype_mapping[i].sw_ptype |=
12896                                                 RTE_PTYPE_L4_FRAG;
12897                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12898                                            in_tunnel) {
12899                                         ptype_mapping[i].sw_ptype |=
12900                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12901                                         ptype_mapping[i].sw_ptype |=
12902                                                 RTE_PTYPE_INNER_L4_FRAG;
12903                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12904                                         ptype_mapping[i].sw_ptype |=
12905                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12906                                         in_tunnel = true;
12907                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12908                                            !in_tunnel)
12909                                         ptype_mapping[i].sw_ptype |=
12910                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12911                                 else if (!strncasecmp(name, "IPV4", 4) &&
12912                                          in_tunnel)
12913                                         ptype_mapping[i].sw_ptype |=
12914                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12915                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12916                                          !in_tunnel) {
12917                                         ptype_mapping[i].sw_ptype |=
12918                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12919                                         ptype_mapping[i].sw_ptype |=
12920                                                 RTE_PTYPE_L4_FRAG;
12921                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12922                                            in_tunnel) {
12923                                         ptype_mapping[i].sw_ptype |=
12924                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12925                                         ptype_mapping[i].sw_ptype |=
12926                                                 RTE_PTYPE_INNER_L4_FRAG;
12927                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12928                                         ptype_mapping[i].sw_ptype |=
12929                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12930                                         in_tunnel = true;
12931                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12932                                            !in_tunnel)
12933                                         ptype_mapping[i].sw_ptype |=
12934                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12935                                 else if (!strncasecmp(name, "IPV6", 4) &&
12936                                          in_tunnel)
12937                                         ptype_mapping[i].sw_ptype |=
12938                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12939                                 else if (!strncasecmp(name, "UDP", 3) &&
12940                                          !in_tunnel)
12941                                         ptype_mapping[i].sw_ptype |=
12942                                                 RTE_PTYPE_L4_UDP;
12943                                 else if (!strncasecmp(name, "UDP", 3) &&
12944                                          in_tunnel)
12945                                         ptype_mapping[i].sw_ptype |=
12946                                                 RTE_PTYPE_INNER_L4_UDP;
12947                                 else if (!strncasecmp(name, "TCP", 3) &&
12948                                          !in_tunnel)
12949                                         ptype_mapping[i].sw_ptype |=
12950                                                 RTE_PTYPE_L4_TCP;
12951                                 else if (!strncasecmp(name, "TCP", 3) &&
12952                                          in_tunnel)
12953                                         ptype_mapping[i].sw_ptype |=
12954                                                 RTE_PTYPE_INNER_L4_TCP;
12955                                 else if (!strncasecmp(name, "SCTP", 4) &&
12956                                          !in_tunnel)
12957                                         ptype_mapping[i].sw_ptype |=
12958                                                 RTE_PTYPE_L4_SCTP;
12959                                 else if (!strncasecmp(name, "SCTP", 4) &&
12960                                          in_tunnel)
12961                                         ptype_mapping[i].sw_ptype |=
12962                                                 RTE_PTYPE_INNER_L4_SCTP;
12963                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12964                                           !strncasecmp(name, "ICMPV6", 6)) &&
12965                                          !in_tunnel)
12966                                         ptype_mapping[i].sw_ptype |=
12967                                                 RTE_PTYPE_L4_ICMP;
12968                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12969                                           !strncasecmp(name, "ICMPV6", 6)) &&
12970                                          in_tunnel)
12971                                         ptype_mapping[i].sw_ptype |=
12972                                                 RTE_PTYPE_INNER_L4_ICMP;
12973                                 else if (!strncasecmp(name, "GTPC", 4)) {
12974                                         ptype_mapping[i].sw_ptype |=
12975                                                 RTE_PTYPE_TUNNEL_GTPC;
12976                                         in_tunnel = true;
12977                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12978                                         ptype_mapping[i].sw_ptype |=
12979                                                 RTE_PTYPE_TUNNEL_GTPU;
12980                                         in_tunnel = true;
12981                                 } else if (!strncasecmp(name, "ESP", 3)) {
12982                                         ptype_mapping[i].sw_ptype |=
12983                                                 RTE_PTYPE_TUNNEL_ESP;
12984                                         in_tunnel = true;
12985                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12986                                         ptype_mapping[i].sw_ptype |=
12987                                                 RTE_PTYPE_TUNNEL_GRENAT;
12988                                         in_tunnel = true;
12989                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12990                                            !strncasecmp(name, "L2TPV2", 6) ||
12991                                            !strncasecmp(name, "L2TPV3", 6)) {
12992                                         ptype_mapping[i].sw_ptype |=
12993                                                 RTE_PTYPE_TUNNEL_L2TP;
12994                                         in_tunnel = true;
12995                                 }
12996
12997                                 break;
12998                         }
12999                 }
13000         }
13001
13002         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
13003                                                 ptype_num, 0);
13004         if (ret)
13005                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
13006
13007         rte_free(ptype_mapping);
13008         rte_free(ptype);
13009         return ret;
13010 }
13011
13012 void
13013 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
13014                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
13015 {
13016         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
13017         uint32_t proto_num;
13018         struct rte_pmd_i40e_proto_info *proto;
13019         uint32_t buff_size;
13020         uint32_t i;
13021         int ret;
13022
13023         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
13024             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
13025                 PMD_DRV_LOG(ERR, "Unsupported operation.");
13026                 return;
13027         }
13028
13029         /* get information about protocol number */
13030         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
13031                                        (uint8_t *)&proto_num, sizeof(proto_num),
13032                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
13033         if (ret) {
13034                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
13035                 return;
13036         }
13037         if (!proto_num) {
13038                 PMD_DRV_LOG(INFO, "No new protocol added");
13039                 return;
13040         }
13041
13042         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
13043         proto = rte_zmalloc("new_proto", buff_size, 0);
13044         if (!proto) {
13045                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
13046                 return;
13047         }
13048
13049         /* get information about protocol list */
13050         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
13051                                         (uint8_t *)proto, buff_size,
13052                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
13053         if (ret) {
13054                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
13055                 rte_free(proto);
13056                 return;
13057         }
13058
13059         /* Check if GTP is supported. */
13060         for (i = 0; i < proto_num; i++) {
13061                 if (!strncmp(proto[i].name, "GTP", 3)) {
13062                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
13063                                 pf->gtp_support = true;
13064                         else
13065                                 pf->gtp_support = false;
13066                         break;
13067                 }
13068         }
13069
13070         /* Check if ESP is supported. */
13071         for (i = 0; i < proto_num; i++) {
13072                 if (!strncmp(proto[i].name, "ESP", 3)) {
13073                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
13074                                 pf->esp_support = true;
13075                         else
13076                                 pf->esp_support = false;
13077                         break;
13078                 }
13079         }
13080
13081         /* Update customized pctype info */
13082         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
13083                                             proto_num, proto, op);
13084         if (ret)
13085                 PMD_DRV_LOG(INFO, "No pctype is updated.");
13086
13087         /* Update customized ptype info */
13088         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
13089                                            proto_num, proto, op);
13090         if (ret)
13091                 PMD_DRV_LOG(INFO, "No ptype is updated.");
13092
13093         rte_free(proto);
13094 }
13095
13096 /* Create a QinQ cloud filter
13097  *
13098  * The Fortville NIC has limited resources for tunnel filters,
13099  * so we can only reuse existing filters.
13100  *
13101  * In step 1 we define which Field Vector fields can be used for
13102  * filter types.
13103  * As we do not have the inner tag defined as a field,
13104  * we have to define it first, by reusing one of L1 entries.
13105  *
13106  * In step 2 we are replacing one of existing filter types with
13107  * a new one for QinQ.
13108  * As we reusing L1 and replacing L2, some of the default filter
13109  * types will disappear,which depends on L1 and L2 entries we reuse.
13110  *
13111  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
13112  *
13113  * 1.   Create L1 filter of outer vlan (12b) which will be in use
13114  *              later when we define the cloud filter.
13115  *      a.      Valid_flags.replace_cloud = 0
13116  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
13117  *      c.      New_filter = 0x10
13118  *      d.      TR bit = 0xff (optional, not used here)
13119  *      e.      Buffer â€“ 2 entries:
13120  *              i.      Byte 0 = 8 (outer vlan FV index).
13121  *                      Byte 1 = 0 (rsv)
13122  *                      Byte 2-3 = 0x0fff
13123  *              ii.     Byte 0 = 37 (inner vlan FV index).
13124  *                      Byte 1 =0 (rsv)
13125  *                      Byte 2-3 = 0x0fff
13126  *
13127  * Step 2:
13128  * 2.   Create cloud filter using two L1 filters entries: stag and
13129  *              new filter(outer vlan+ inner vlan)
13130  *      a.      Valid_flags.replace_cloud = 1
13131  *      b.      Old_filter = 1 (instead of outer IP)
13132  *      c.      New_filter = 0x10
13133  *      d.      Buffer â€“ 2 entries:
13134  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
13135  *                      Byte 1-3 = 0 (rsv)
13136  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
13137  *                      Byte 9-11 = 0 (rsv)
13138  */
13139 static int
13140 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
13141 {
13142         int ret = -ENOTSUP;
13143         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
13144         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
13145         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13146         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
13147
13148         if (pf->support_multi_driver) {
13149                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
13150                 return ret;
13151         }
13152
13153         /* Init */
13154         memset(&filter_replace, 0,
13155                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13156         memset(&filter_replace_buf, 0,
13157                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13158
13159         /* create L1 filter */
13160         filter_replace.old_filter_type =
13161                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
13162         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13163         filter_replace.tr_bit = 0;
13164
13165         /* Prepare the buffer, 2 entries */
13166         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
13167         filter_replace_buf.data[0] |=
13168                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13169         /* Field Vector 12b mask */
13170         filter_replace_buf.data[2] = 0xff;
13171         filter_replace_buf.data[3] = 0x0f;
13172         filter_replace_buf.data[4] =
13173                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
13174         filter_replace_buf.data[4] |=
13175                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13176         /* Field Vector 12b mask */
13177         filter_replace_buf.data[6] = 0xff;
13178         filter_replace_buf.data[7] = 0x0f;
13179         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13180                         &filter_replace_buf);
13181         if (ret != I40E_SUCCESS)
13182                 return ret;
13183
13184         if (filter_replace.old_filter_type !=
13185             filter_replace.new_filter_type)
13186                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
13187                             " original: 0x%x, new: 0x%x",
13188                             dev->device->name,
13189                             filter_replace.old_filter_type,
13190                             filter_replace.new_filter_type);
13191
13192         /* Apply the second L2 cloud filter */
13193         memset(&filter_replace, 0,
13194                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13195         memset(&filter_replace_buf, 0,
13196                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13197
13198         /* create L2 filter, input for L2 filter will be L1 filter  */
13199         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
13200         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
13201         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13202
13203         /* Prepare the buffer, 2 entries */
13204         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
13205         filter_replace_buf.data[0] |=
13206                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13207         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13208         filter_replace_buf.data[4] |=
13209                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13210         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13211                         &filter_replace_buf);
13212         if (!ret && (filter_replace.old_filter_type !=
13213                      filter_replace.new_filter_type))
13214                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
13215                             " original: 0x%x, new: 0x%x",
13216                             dev->device->name,
13217                             filter_replace.old_filter_type,
13218                             filter_replace.new_filter_type);
13219
13220         return ret;
13221 }
13222
13223 int
13224 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
13225                    const struct rte_flow_action_rss *in)
13226 {
13227         if (in->key_len > RTE_DIM(out->key) ||
13228             in->queue_num > RTE_DIM(out->queue))
13229                 return -EINVAL;
13230         if (!in->key && in->key_len)
13231                 return -EINVAL;
13232         out->conf = (struct rte_flow_action_rss){
13233                 .func = in->func,
13234                 .level = in->level,
13235                 .types = in->types,
13236                 .key_len = in->key_len,
13237                 .queue_num = in->queue_num,
13238                 .queue = memcpy(out->queue, in->queue,
13239                                 sizeof(*in->queue) * in->queue_num),
13240         };
13241         if (in->key)
13242                 out->conf.key = memcpy(out->key, in->key, in->key_len);
13243         return 0;
13244 }
13245
13246 /* Write HENA register to enable hash */
13247 static int
13248 i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
13249 {
13250         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13251         uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
13252         uint64_t hena;
13253         int ret;
13254
13255         ret = i40e_set_rss_key(pf->main_vsi, key,
13256                                rss_conf->conf.key_len);
13257         if (ret)
13258                 return ret;
13259
13260         hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
13261         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
13262         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
13263         I40E_WRITE_FLUSH(hw);
13264
13265         return 0;
13266 }
13267
13268 /* Configure hash input set */
13269 static int
13270 i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
13271 {
13272         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13273         struct rte_eth_input_set_conf conf;
13274         uint64_t mask0;
13275         int ret = 0;
13276         uint32_t j;
13277         int i;
13278         static const struct {
13279                 uint64_t type;
13280                 enum rte_eth_input_set_field field;
13281         } inset_match_table[] = {
13282                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
13283                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13284                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
13285                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13286                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
13287                         RTE_ETH_INPUT_SET_UNKNOWN},
13288                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
13289                         RTE_ETH_INPUT_SET_UNKNOWN},
13290
13291                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
13292                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13293                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
13294                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13295                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
13296                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13297                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
13298                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13299
13300                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
13301                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13302                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
13303                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13304                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
13305                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13306                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
13307                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13308
13309                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
13310                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13311                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
13312                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13313                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
13314                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13315                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
13316                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13317
13318                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
13319                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13320                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
13321                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13322                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
13323                         RTE_ETH_INPUT_SET_UNKNOWN},
13324                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
13325                         RTE_ETH_INPUT_SET_UNKNOWN},
13326
13327                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
13328                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13329                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
13330                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13331                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
13332                         RTE_ETH_INPUT_SET_UNKNOWN},
13333                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
13334                         RTE_ETH_INPUT_SET_UNKNOWN},
13335
13336                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
13337                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13338                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
13339                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13340                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
13341                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13342                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
13343                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13344
13345                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
13346                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13347                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
13348                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13349                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
13350                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13351                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
13352                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13353
13354                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
13355                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13356                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
13357                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13358                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
13359                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13360                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
13361                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13362
13363                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
13364                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13365                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
13366                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13367                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
13368                         RTE_ETH_INPUT_SET_UNKNOWN},
13369                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
13370                         RTE_ETH_INPUT_SET_UNKNOWN},
13371         };
13372
13373         mask0 = types & pf->adapter->flow_types_mask;
13374         conf.op = RTE_ETH_INPUT_SET_SELECT;
13375         conf.inset_size = 0;
13376         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
13377                 if (mask0 & (1ULL << i)) {
13378                         conf.flow_type = i;
13379                         break;
13380                 }
13381         }
13382
13383         for (j = 0; j < RTE_DIM(inset_match_table); j++) {
13384                 if ((types & inset_match_table[j].type) ==
13385                     inset_match_table[j].type) {
13386                         if (inset_match_table[j].field ==
13387                             RTE_ETH_INPUT_SET_UNKNOWN)
13388                                 return -EINVAL;
13389
13390                         conf.field[conf.inset_size] =
13391                                 inset_match_table[j].field;
13392                         conf.inset_size++;
13393                 }
13394         }
13395
13396         if (conf.inset_size) {
13397                 ret = i40e_hash_filter_inset_select(hw, &conf);
13398                 if (ret)
13399                         return ret;
13400         }
13401
13402         return ret;
13403 }
13404
13405 /* Look up the conflicted rule then mark it as invalid */
13406 static void
13407 i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
13408                 struct i40e_rte_flow_rss_conf *conf)
13409 {
13410         struct i40e_rss_filter *rss_item;
13411         uint64_t rss_inset;
13412
13413         /* Clear input set bits before comparing the pctype */
13414         rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
13415                 ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
13416
13417         /* Look up the conflicted rule then mark it as invalid */
13418         TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
13419                 if (!rss_item->rss_filter_info.valid)
13420                         continue;
13421
13422                 if (conf->conf.queue_num &&
13423                     rss_item->rss_filter_info.conf.queue_num)
13424                         rss_item->rss_filter_info.valid = false;
13425
13426                 if (conf->conf.types &&
13427                     (rss_item->rss_filter_info.conf.types &
13428                     rss_inset) ==
13429                     (conf->conf.types & rss_inset))
13430                         rss_item->rss_filter_info.valid = false;
13431
13432                 if (conf->conf.func ==
13433                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
13434                     rss_item->rss_filter_info.conf.func ==
13435                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13436                         rss_item->rss_filter_info.valid = false;
13437         }
13438 }
13439
13440 /* Configure RSS hash function */
13441 static int
13442 i40e_rss_config_hash_function(struct i40e_pf *pf,
13443                 struct i40e_rte_flow_rss_conf *conf)
13444 {
13445         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13446         uint32_t reg, i;
13447         uint64_t mask0;
13448         uint16_t j;
13449
13450         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13451                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13452                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
13453                         PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
13454                         I40E_WRITE_FLUSH(hw);
13455                         i40e_rss_mark_invalid_rule(pf, conf);
13456
13457                         return 0;
13458                 }
13459                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
13460
13461                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13462                 I40E_WRITE_FLUSH(hw);
13463                 i40e_rss_mark_invalid_rule(pf, conf);
13464         } else if (conf->conf.func ==
13465                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13466                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13467
13468                 i40e_set_symmetric_hash_enable_per_port(hw, 1);
13469                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13470                         if (mask0 & (1UL << i))
13471                                 break;
13472                 }
13473
13474                 if (i == UINT64_BIT)
13475                         return -EINVAL;
13476
13477                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13478                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13479                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13480                                 i40e_write_global_rx_ctl(hw,
13481                                         I40E_GLQF_HSYM(j),
13482                                         I40E_GLQF_HSYM_SYMH_ENA_MASK);
13483                 }
13484         }
13485
13486         return 0;
13487 }
13488
13489 /* Enable RSS according to the configuration */
13490 static int
13491 i40e_rss_enable_hash(struct i40e_pf *pf,
13492                 struct i40e_rte_flow_rss_conf *conf)
13493 {
13494         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13495         struct i40e_rte_flow_rss_conf rss_conf;
13496
13497         if (!(conf->conf.types & pf->adapter->flow_types_mask))
13498                 return -ENOTSUP;
13499
13500         memset(&rss_conf, 0, sizeof(rss_conf));
13501         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13502
13503         /* Configure hash input set */
13504         if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
13505                 return -EINVAL;
13506
13507         if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
13508             (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
13509                 /* Random default keys */
13510                 static uint32_t rss_key_default[] = {0x6b793944,
13511                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
13512                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
13513                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
13514
13515                 rss_conf.conf.key = (uint8_t *)rss_key_default;
13516                 rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
13517                                 sizeof(uint32_t);
13518                 PMD_DRV_LOG(INFO,
13519                         "No valid RSS key config for i40e, using default\n");
13520         }
13521
13522         rss_conf.conf.types |= rss_info->conf.types;
13523         i40e_rss_hash_set(pf, &rss_conf);
13524
13525         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
13526                 i40e_rss_config_hash_function(pf, conf);
13527
13528         i40e_rss_mark_invalid_rule(pf, conf);
13529
13530         return 0;
13531 }
13532
13533 /* Configure RSS queue region */
13534 static int
13535 i40e_rss_config_queue_region(struct i40e_pf *pf,
13536                 struct i40e_rte_flow_rss_conf *conf)
13537 {
13538         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13539         uint32_t lut = 0;
13540         uint16_t j, num;
13541         uint32_t i;
13542
13543         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13544          * It's necessary to calculate the actual PF queues that are configured.
13545          */
13546         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13547                 num = i40e_pf_calc_configured_queues_num(pf);
13548         else
13549                 num = pf->dev_data->nb_rx_queues;
13550
13551         num = RTE_MIN(num, conf->conf.queue_num);
13552         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13553                         num);
13554
13555         if (num == 0) {
13556                 PMD_DRV_LOG(ERR,
13557                         "No PF queues are configured to enable RSS for port %u",
13558                         pf->dev_data->port_id);
13559                 return -ENOTSUP;
13560         }
13561
13562         /* Fill in redirection table */
13563         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13564                 if (j == num)
13565                         j = 0;
13566                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
13567                         hw->func_caps.rss_table_entry_width) - 1));
13568                 if ((i & 3) == 3)
13569                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13570         }
13571
13572         i40e_rss_mark_invalid_rule(pf, conf);
13573
13574         return 0;
13575 }
13576
13577 /* Configure RSS hash function to default */
13578 static int
13579 i40e_rss_clear_hash_function(struct i40e_pf *pf,
13580                 struct i40e_rte_flow_rss_conf *conf)
13581 {
13582         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13583         uint32_t i, reg;
13584         uint64_t mask0;
13585         uint16_t j;
13586
13587         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13588                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13589                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
13590                         PMD_DRV_LOG(DEBUG,
13591                                 "Hash function already set to Toeplitz");
13592                         I40E_WRITE_FLUSH(hw);
13593
13594                         return 0;
13595                 }
13596                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
13597
13598                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13599                 I40E_WRITE_FLUSH(hw);
13600         } else if (conf->conf.func ==
13601                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13602                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13603
13604                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13605                         if (mask0 & (1UL << i))
13606                                 break;
13607                 }
13608
13609                 if (i == UINT64_BIT)
13610                         return -EINVAL;
13611
13612                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13613                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13614                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13615                                 i40e_write_global_rx_ctl(hw,
13616                                         I40E_GLQF_HSYM(j),
13617                                         0);
13618                 }
13619         }
13620
13621         return 0;
13622 }
13623
13624 /* Disable RSS hash and configure default input set */
13625 static int
13626 i40e_rss_disable_hash(struct i40e_pf *pf,
13627                 struct i40e_rte_flow_rss_conf *conf)
13628 {
13629         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13630         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13631         struct i40e_rte_flow_rss_conf rss_conf;
13632         uint32_t i;
13633
13634         memset(&rss_conf, 0, sizeof(rss_conf));
13635         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13636
13637         /* Disable RSS hash */
13638         rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
13639         i40e_rss_hash_set(pf, &rss_conf);
13640
13641         for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
13642                 if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
13643                     !(conf->conf.types & (1ULL << i)))
13644                         continue;
13645
13646                 /* Configure default input set */
13647                 struct rte_eth_input_set_conf input_conf = {
13648                         .op = RTE_ETH_INPUT_SET_SELECT,
13649                         .flow_type = i,
13650                         .inset_size = 1,
13651                 };
13652                 input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
13653                 i40e_hash_filter_inset_select(hw, &input_conf);
13654         }
13655
13656         rss_info->conf.types = rss_conf.conf.types;
13657
13658         i40e_rss_clear_hash_function(pf, conf);
13659
13660         return 0;
13661 }
13662
13663 /* Configure RSS queue region to default */
13664 static int
13665 i40e_rss_clear_queue_region(struct i40e_pf *pf)
13666 {
13667         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13668         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13669         uint16_t queue[I40E_MAX_Q_PER_TC];
13670         uint32_t num_rxq, i;
13671         uint32_t lut = 0;
13672         uint16_t j, num;
13673
13674         num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
13675
13676         for (j = 0; j < num_rxq; j++)
13677                 queue[j] = j;
13678
13679         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13680          * It's necessary to calculate the actual PF queues that are configured.
13681          */
13682         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13683                 num = i40e_pf_calc_configured_queues_num(pf);
13684         else
13685                 num = pf->dev_data->nb_rx_queues;
13686
13687         num = RTE_MIN(num, num_rxq);
13688         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13689                         num);
13690
13691         if (num == 0) {
13692                 PMD_DRV_LOG(ERR,
13693                         "No PF queues are configured to enable RSS for port %u",
13694                         pf->dev_data->port_id);
13695                 return -ENOTSUP;
13696         }
13697
13698         /* Fill in redirection table */
13699         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13700                 if (j == num)
13701                         j = 0;
13702                 lut = (lut << 8) | (queue[j] & ((0x1 <<
13703                         hw->func_caps.rss_table_entry_width) - 1));
13704                 if ((i & 3) == 3)
13705                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13706         }
13707
13708         rss_info->conf.queue_num = 0;
13709         memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
13710
13711         return 0;
13712 }
13713
13714 int
13715 i40e_config_rss_filter(struct i40e_pf *pf,
13716                 struct i40e_rte_flow_rss_conf *conf, bool add)
13717 {
13718         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13719         struct rte_flow_action_rss update_conf = rss_info->conf;
13720         int ret = 0;
13721
13722         if (add) {
13723                 if (conf->conf.queue_num) {
13724                         /* Configure RSS queue region */
13725                         ret = i40e_rss_config_queue_region(pf, conf);
13726                         if (ret)
13727                                 return ret;
13728
13729                         update_conf.queue_num = conf->conf.queue_num;
13730                         update_conf.queue = conf->conf.queue;
13731                 } else if (conf->conf.func ==
13732                            RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13733                         /* Configure hash function */
13734                         ret = i40e_rss_config_hash_function(pf, conf);
13735                         if (ret)
13736                                 return ret;
13737
13738                         update_conf.func = conf->conf.func;
13739                 } else {
13740                         /* Configure hash enable and input set */
13741                         ret = i40e_rss_enable_hash(pf, conf);
13742                         if (ret)
13743                                 return ret;
13744
13745                         update_conf.types |= conf->conf.types;
13746                         update_conf.key = conf->conf.key;
13747                         update_conf.key_len = conf->conf.key_len;
13748                 }
13749
13750                 /* Update RSS info in pf */
13751                 if (i40e_rss_conf_init(rss_info, &update_conf))
13752                         return -EINVAL;
13753         } else {
13754                 if (!conf->valid)
13755                         return 0;
13756
13757                 if (conf->conf.queue_num)
13758                         i40e_rss_clear_queue_region(pf);
13759                 else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13760                         i40e_rss_clear_hash_function(pf, conf);
13761                 else
13762                         i40e_rss_disable_hash(pf, conf);
13763         }
13764
13765         return 0;
13766 }
13767
13768 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
13769 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
13770 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
13771 RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
13772 #endif
13773 #ifdef RTE_LIBRTE_I40E_DEBUG_TX
13774 RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
13775 #endif
13776 #ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
13777 RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG);
13778 #endif
13779
13780 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
13781                               ETH_I40E_FLOATING_VEB_ARG "=1"
13782                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
13783                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
13784                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
13785                               ETH_I40E_USE_LATEST_VEC "=0|1");