a8e8f6b865fdd33bc080948b1d1752e8d91ec9e2
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
46 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
47 #define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
48 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
49
50 #define I40E_CLEAR_PXE_WAIT_MS     200
51 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
52 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
53
54 /* Maximun number of capability elements */
55 #define I40E_MAX_CAP_ELE_NUM       128
56
57 /* Wait count and interval */
58 #define I40E_CHK_Q_ENA_COUNT       1000
59 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
60
61 /* Maximun number of VSI */
62 #define I40E_MAX_NUM_VSIS          (384UL)
63
64 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
65
66 /* Flow control default timer */
67 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
68
69 /* Flow control enable fwd bit */
70 #define I40E_PRTMAC_FWD_CTRL   0x00000001
71
72 /* Receive Packet Buffer size */
73 #define I40E_RXPBSIZE (968 * 1024)
74
75 /* Kilobytes shift */
76 #define I40E_KILOSHIFT 10
77
78 /* Flow control default high water */
79 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
80
81 /* Flow control default low water */
82 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
83
84 /* Receive Average Packet Size in Byte*/
85 #define I40E_PACKET_AVERAGE_SIZE 128
86
87 /* Mask of PF interrupt causes */
88 #define I40E_PFINT_ICR0_ENA_MASK ( \
89                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
91                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
92                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
93                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
94                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
95                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
97                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
98
99 #define I40E_FLOW_TYPES ( \
100         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
105         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
110         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
111
112 /* Additional timesync values. */
113 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
114 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
115 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
116 #define I40E_PRTTSYN_TSYNENA     0x80000000
117 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
118 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
119
120 /**
121  * Below are values for writing un-exposed registers suggested
122  * by silicon experts
123  */
124 /* Destination MAC address */
125 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
126 /* Source MAC address */
127 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
128 /* Outer (S-Tag) VLAN tag in the outer L2 header */
129 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
130 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
131 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
132 /* Single VLAN tag in the inner L2 header */
133 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
134 /* Source IPv4 address */
135 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
136 /* Destination IPv4 address */
137 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
138 /* Source IPv4 address for X722 */
139 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
140 /* Destination IPv4 address for X722 */
141 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
142 /* IPv4 Protocol for X722 */
143 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
144 /* IPv4 Time to Live for X722 */
145 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
146 /* IPv4 Type of Service (TOS) */
147 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
148 /* IPv4 Protocol */
149 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
150 /* IPv4 Time to Live */
151 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
152 /* Source IPv6 address */
153 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
154 /* Destination IPv6 address */
155 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
156 /* IPv6 Traffic Class (TC) */
157 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
158 /* IPv6 Next Header */
159 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
160 /* IPv6 Hop Limit */
161 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
162 /* Source L4 port */
163 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
164 /* Destination L4 port */
165 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
166 /* SCTP verification tag */
167 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
168 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
169 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
170 /* Source port of tunneling UDP */
171 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
172 /* Destination port of tunneling UDP */
173 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
174 /* UDP Tunneling ID, NVGRE/GRE key */
175 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
176 /* Last ether type */
177 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
178 /* Tunneling outer destination IPv4 address */
179 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
180 /* Tunneling outer destination IPv6 address */
181 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
182 /* 1st word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
184 /* 2nd word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
186 /* 3rd word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
188 /* 4th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
190 /* 5th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
192 /* 6th word of flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
194 /* 7th word of flex payload */
195 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
196 /* 8th word of flex payload */
197 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
198 /* all 8 words flex payload */
199 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
200 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
201
202 #define I40E_TRANSLATE_INSET 0
203 #define I40E_TRANSLATE_REG   1
204
205 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
206 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
207 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
208 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
209 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
210 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
211
212 /* PCI offset for querying capability */
213 #define PCI_DEV_CAP_REG            0xA4
214 /* PCI offset for enabling/disabling Extended Tag */
215 #define PCI_DEV_CTRL_REG           0xA8
216 /* Bit mask of Extended Tag capability */
217 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
218 /* Bit shift of Extended Tag enable/disable */
219 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
220 /* Bit mask of Extended Tag enable/disable */
221 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
222
223 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
224 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
225 static int i40e_dev_configure(struct rte_eth_dev *dev);
226 static int i40e_dev_start(struct rte_eth_dev *dev);
227 static void i40e_dev_stop(struct rte_eth_dev *dev);
228 static int i40e_dev_close(struct rte_eth_dev *dev);
229 static int  i40e_dev_reset(struct rte_eth_dev *dev);
230 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
231 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
232 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
233 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
234 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
235 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
236 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
237                                struct rte_eth_stats *stats);
238 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
239                                struct rte_eth_xstat *xstats, unsigned n);
240 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
241                                      struct rte_eth_xstat_name *xstats_names,
242                                      unsigned limit);
243 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
244 static int i40e_fw_version_get(struct rte_eth_dev *dev,
245                                 char *fw_version, size_t fw_size);
246 static int i40e_dev_info_get(struct rte_eth_dev *dev,
247                              struct rte_eth_dev_info *dev_info);
248 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
249                                 uint16_t vlan_id,
250                                 int on);
251 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
252                               enum rte_vlan_type vlan_type,
253                               uint16_t tpid);
254 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
255 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
256                                       uint16_t queue,
257                                       int on);
258 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
259 static int i40e_dev_led_on(struct rte_eth_dev *dev);
260 static int i40e_dev_led_off(struct rte_eth_dev *dev);
261 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
264                               struct rte_eth_fc_conf *fc_conf);
265 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
266                                        struct rte_eth_pfc_conf *pfc_conf);
267 static int i40e_macaddr_add(struct rte_eth_dev *dev,
268                             struct rte_ether_addr *mac_addr,
269                             uint32_t index,
270                             uint32_t pool);
271 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
272 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
273                                     struct rte_eth_rss_reta_entry64 *reta_conf,
274                                     uint16_t reta_size);
275 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
276                                    struct rte_eth_rss_reta_entry64 *reta_conf,
277                                    uint16_t reta_size);
278
279 static int i40e_get_cap(struct i40e_hw *hw);
280 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
281 static int i40e_pf_setup(struct i40e_pf *pf);
282 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
283 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
284 static int i40e_dcb_setup(struct rte_eth_dev *dev);
285 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
286                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
287 static void i40e_stat_update_48(struct i40e_hw *hw,
288                                uint32_t hireg,
289                                uint32_t loreg,
290                                bool offset_loaded,
291                                uint64_t *offset,
292                                uint64_t *stat);
293 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
294 static void i40e_dev_interrupt_handler(void *param);
295 static void i40e_dev_alarm_handler(void *param);
296 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
297                                 uint32_t base, uint32_t num);
298 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
299 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
300                         uint32_t base);
301 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
302                         uint16_t num);
303 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
304 static int i40e_veb_release(struct i40e_veb *veb);
305 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
306                                                 struct i40e_vsi *vsi);
307 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
308 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
309                                              struct i40e_macvlan_filter *mv_f,
310                                              int num,
311                                              uint16_t vlan);
312 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
313 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
314                                     struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
316                                       struct rte_eth_rss_conf *rss_conf);
317 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
320                                         struct rte_eth_udp_tunnel *udp_tunnel);
321 static void i40e_filter_input_set_init(struct i40e_pf *pf);
322 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
323                                 enum rte_filter_op filter_op,
324                                 void *arg);
325 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
326                                 enum rte_filter_type filter_type,
327                                 enum rte_filter_op filter_op,
328                                 void *arg);
329 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
330                                   struct rte_eth_dcb_info *dcb_info);
331 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
332 static void i40e_configure_registers(struct i40e_hw *hw);
333 static void i40e_hw_init(struct rte_eth_dev *dev);
334 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
335 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
336                                                      uint16_t seid,
337                                                      uint16_t rule_type,
338                                                      uint16_t *entries,
339                                                      uint16_t count,
340                                                      uint16_t rule_id);
341 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
342                         struct rte_eth_mirror_conf *mirror_conf,
343                         uint8_t sw_id, uint8_t on);
344 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
345
346 static int i40e_timesync_enable(struct rte_eth_dev *dev);
347 static int i40e_timesync_disable(struct rte_eth_dev *dev);
348 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
349                                            struct timespec *timestamp,
350                                            uint32_t flags);
351 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
352                                            struct timespec *timestamp);
353 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
354
355 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
356
357 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
358                                    struct timespec *timestamp);
359 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
360                                     const struct timespec *timestamp);
361
362 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
363                                          uint16_t queue_id);
364 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
365                                           uint16_t queue_id);
366
367 static int i40e_get_regs(struct rte_eth_dev *dev,
368                          struct rte_dev_reg_info *regs);
369
370 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
371
372 static int i40e_get_eeprom(struct rte_eth_dev *dev,
373                            struct rte_dev_eeprom_info *eeprom);
374
375 static int i40e_get_module_info(struct rte_eth_dev *dev,
376                                 struct rte_eth_dev_module_info *modinfo);
377 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
378                                   struct rte_dev_eeprom_info *info);
379
380 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
381                                       struct rte_ether_addr *mac_addr);
382
383 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
384
385 static int i40e_ethertype_filter_convert(
386         const struct rte_eth_ethertype_filter *input,
387         struct i40e_ethertype_filter *filter);
388 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
389                                    struct i40e_ethertype_filter *filter);
390
391 static int i40e_tunnel_filter_convert(
392         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
393         struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
395                                 struct i40e_tunnel_filter *tunnel_filter);
396 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
397
398 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
399 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
400 static void i40e_filter_restore(struct i40e_pf *pf);
401 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
402 static int i40e_pf_config_rss(struct i40e_pf *pf);
403
404 static const char *const valid_keys[] = {
405         ETH_I40E_FLOATING_VEB_ARG,
406         ETH_I40E_FLOATING_VEB_LIST_ARG,
407         ETH_I40E_SUPPORT_MULTI_DRIVER,
408         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
409         ETH_I40E_USE_LATEST_VEC,
410         ETH_I40E_VF_MSG_CFG,
411         NULL};
412
413 static const struct rte_pci_id pci_id_i40e_map[] = {
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
425         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
426         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
440         { .vendor_id = 0, /* sentinel */ },
441 };
442
443 static const struct eth_dev_ops i40e_eth_dev_ops = {
444         .dev_configure                = i40e_dev_configure,
445         .dev_start                    = i40e_dev_start,
446         .dev_stop                     = i40e_dev_stop,
447         .dev_close                    = i40e_dev_close,
448         .dev_reset                    = i40e_dev_reset,
449         .promiscuous_enable           = i40e_dev_promiscuous_enable,
450         .promiscuous_disable          = i40e_dev_promiscuous_disable,
451         .allmulticast_enable          = i40e_dev_allmulticast_enable,
452         .allmulticast_disable         = i40e_dev_allmulticast_disable,
453         .dev_set_link_up              = i40e_dev_set_link_up,
454         .dev_set_link_down            = i40e_dev_set_link_down,
455         .link_update                  = i40e_dev_link_update,
456         .stats_get                    = i40e_dev_stats_get,
457         .xstats_get                   = i40e_dev_xstats_get,
458         .xstats_get_names             = i40e_dev_xstats_get_names,
459         .stats_reset                  = i40e_dev_stats_reset,
460         .xstats_reset                 = i40e_dev_stats_reset,
461         .fw_version_get               = i40e_fw_version_get,
462         .dev_infos_get                = i40e_dev_info_get,
463         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
464         .vlan_filter_set              = i40e_vlan_filter_set,
465         .vlan_tpid_set                = i40e_vlan_tpid_set,
466         .vlan_offload_set             = i40e_vlan_offload_set,
467         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
468         .vlan_pvid_set                = i40e_vlan_pvid_set,
469         .rx_queue_start               = i40e_dev_rx_queue_start,
470         .rx_queue_stop                = i40e_dev_rx_queue_stop,
471         .tx_queue_start               = i40e_dev_tx_queue_start,
472         .tx_queue_stop                = i40e_dev_tx_queue_stop,
473         .rx_queue_setup               = i40e_dev_rx_queue_setup,
474         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
475         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
476         .rx_queue_release             = i40e_dev_rx_queue_release,
477         .tx_queue_setup               = i40e_dev_tx_queue_setup,
478         .tx_queue_release             = i40e_dev_tx_queue_release,
479         .dev_led_on                   = i40e_dev_led_on,
480         .dev_led_off                  = i40e_dev_led_off,
481         .flow_ctrl_get                = i40e_flow_ctrl_get,
482         .flow_ctrl_set                = i40e_flow_ctrl_set,
483         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
484         .mac_addr_add                 = i40e_macaddr_add,
485         .mac_addr_remove              = i40e_macaddr_remove,
486         .reta_update                  = i40e_dev_rss_reta_update,
487         .reta_query                   = i40e_dev_rss_reta_query,
488         .rss_hash_update              = i40e_dev_rss_hash_update,
489         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
490         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
491         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
492         .filter_ctrl                  = i40e_dev_filter_ctrl,
493         .rxq_info_get                 = i40e_rxq_info_get,
494         .txq_info_get                 = i40e_txq_info_get,
495         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
496         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
497         .mirror_rule_set              = i40e_mirror_rule_set,
498         .mirror_rule_reset            = i40e_mirror_rule_reset,
499         .timesync_enable              = i40e_timesync_enable,
500         .timesync_disable             = i40e_timesync_disable,
501         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
502         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
503         .get_dcb_info                 = i40e_dev_get_dcb_info,
504         .timesync_adjust_time         = i40e_timesync_adjust_time,
505         .timesync_read_time           = i40e_timesync_read_time,
506         .timesync_write_time          = i40e_timesync_write_time,
507         .get_reg                      = i40e_get_regs,
508         .get_eeprom_length            = i40e_get_eeprom_length,
509         .get_eeprom                   = i40e_get_eeprom,
510         .get_module_info              = i40e_get_module_info,
511         .get_module_eeprom            = i40e_get_module_eeprom,
512         .mac_addr_set                 = i40e_set_default_mac_addr,
513         .mtu_set                      = i40e_dev_mtu_set,
514         .tm_ops_get                   = i40e_tm_ops_get,
515         .tx_done_cleanup              = i40e_tx_done_cleanup,
516 };
517
518 /* store statistics names and its offset in stats structure */
519 struct rte_i40e_xstats_name_off {
520         char name[RTE_ETH_XSTATS_NAME_SIZE];
521         unsigned offset;
522 };
523
524 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
525         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
526         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
527         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
528         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
529         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
530                 rx_unknown_protocol)},
531         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
532         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
533         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
534         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
535 };
536
537 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
538                 sizeof(rte_i40e_stats_strings[0]))
539
540 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
541         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
542                 tx_dropped_link_down)},
543         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
544         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
545                 illegal_bytes)},
546         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
547         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
548                 mac_local_faults)},
549         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
550                 mac_remote_faults)},
551         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
552                 rx_length_errors)},
553         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
554         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
555         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
556         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
557         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
558         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
559                 rx_size_127)},
560         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
561                 rx_size_255)},
562         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
563                 rx_size_511)},
564         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
565                 rx_size_1023)},
566         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
567                 rx_size_1522)},
568         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
569                 rx_size_big)},
570         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
571                 rx_undersize)},
572         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
573                 rx_oversize)},
574         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
575                 mac_short_packet_dropped)},
576         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
577                 rx_fragments)},
578         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
579         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
580         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
581                 tx_size_127)},
582         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
583                 tx_size_255)},
584         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
585                 tx_size_511)},
586         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
587                 tx_size_1023)},
588         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
589                 tx_size_1522)},
590         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
591                 tx_size_big)},
592         {"rx_flow_director_atr_match_packets",
593                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
594         {"rx_flow_director_sb_match_packets",
595                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
596         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
597                 tx_lpi_status)},
598         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
599                 rx_lpi_status)},
600         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
601                 tx_lpi_count)},
602         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
603                 rx_lpi_count)},
604 };
605
606 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
607                 sizeof(rte_i40e_hw_port_strings[0]))
608
609 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
610         {"xon_packets", offsetof(struct i40e_hw_port_stats,
611                 priority_xon_rx)},
612         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
613                 priority_xoff_rx)},
614 };
615
616 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
617                 sizeof(rte_i40e_rxq_prio_strings[0]))
618
619 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
620         {"xon_packets", offsetof(struct i40e_hw_port_stats,
621                 priority_xon_tx)},
622         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
623                 priority_xoff_tx)},
624         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xon_2_xoff)},
626 };
627
628 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
629                 sizeof(rte_i40e_txq_prio_strings[0]))
630
631 static int
632 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
633         struct rte_pci_device *pci_dev)
634 {
635         char name[RTE_ETH_NAME_MAX_LEN];
636         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
637         int i, retval;
638
639         if (pci_dev->device.devargs) {
640                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
641                                 &eth_da);
642                 if (retval)
643                         return retval;
644         }
645
646         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
647                 sizeof(struct i40e_adapter),
648                 eth_dev_pci_specific_init, pci_dev,
649                 eth_i40e_dev_init, NULL);
650
651         if (retval || eth_da.nb_representor_ports < 1)
652                 return retval;
653
654         /* probe VF representor ports */
655         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
656                 pci_dev->device.name);
657
658         if (pf_ethdev == NULL)
659                 return -ENODEV;
660
661         for (i = 0; i < eth_da.nb_representor_ports; i++) {
662                 struct i40e_vf_representor representor = {
663                         .vf_id = eth_da.representor_ports[i],
664                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
665                                 pf_ethdev->data->dev_private)->switch_domain_id,
666                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
667                                 pf_ethdev->data->dev_private)
668                 };
669
670                 /* representor port net_bdf_port */
671                 snprintf(name, sizeof(name), "net_%s_representor_%d",
672                         pci_dev->device.name, eth_da.representor_ports[i]);
673
674                 retval = rte_eth_dev_create(&pci_dev->device, name,
675                         sizeof(struct i40e_vf_representor), NULL, NULL,
676                         i40e_vf_representor_init, &representor);
677
678                 if (retval)
679                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
680                                 "representor %s.", name);
681         }
682
683         return 0;
684 }
685
686 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
687 {
688         struct rte_eth_dev *ethdev;
689
690         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
691         if (!ethdev)
692                 return 0;
693
694         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
695                 return rte_eth_dev_pci_generic_remove(pci_dev,
696                                         i40e_vf_representor_uninit);
697         else
698                 return rte_eth_dev_pci_generic_remove(pci_dev,
699                                                 eth_i40e_dev_uninit);
700 }
701
702 static struct rte_pci_driver rte_i40e_pmd = {
703         .id_table = pci_id_i40e_map,
704         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
705         .probe = eth_i40e_pci_probe,
706         .remove = eth_i40e_pci_remove,
707 };
708
709 static inline void
710 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
711                          uint32_t reg_val)
712 {
713         uint32_t ori_reg_val;
714         struct rte_eth_dev *dev;
715
716         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
717         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
718         i40e_write_rx_ctl(hw, reg_addr, reg_val);
719         if (ori_reg_val != reg_val)
720                 PMD_DRV_LOG(WARNING,
721                             "i40e device %s changed global register [0x%08x]."
722                             " original: 0x%08x, new: 0x%08x",
723                             dev->device->name, reg_addr, ori_reg_val, reg_val);
724 }
725
726 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
727 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
728 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
729
730 #ifndef I40E_GLQF_ORT
731 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
732 #endif
733 #ifndef I40E_GLQF_PIT
734 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
735 #endif
736 #ifndef I40E_GLQF_L3_MAP
737 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
738 #endif
739
740 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
741 {
742         /*
743          * Initialize registers for parsing packet type of QinQ
744          * This should be removed from code once proper
745          * configuration API is added to avoid configuration conflicts
746          * between ports of the same device.
747          */
748         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
749         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
750 }
751
752 static inline void i40e_config_automask(struct i40e_pf *pf)
753 {
754         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
755         uint32_t val;
756
757         /* INTENA flag is not auto-cleared for interrupt */
758         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
759         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
760                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
761
762         /* If support multi-driver, PF will use INT0. */
763         if (!pf->support_multi_driver)
764                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
765
766         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
767 }
768
769 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
770
771 /*
772  * Add a ethertype filter to drop all flow control frames transmitted
773  * from VSIs.
774 */
775 static void
776 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
777 {
778         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
779         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
780                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
781                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
782         int ret;
783
784         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
785                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
786                                 pf->main_vsi_seid, 0,
787                                 TRUE, NULL, NULL);
788         if (ret)
789                 PMD_INIT_LOG(ERR,
790                         "Failed to add filter to drop flow control frames from VSIs.");
791 }
792
793 static int
794 floating_veb_list_handler(__rte_unused const char *key,
795                           const char *floating_veb_value,
796                           void *opaque)
797 {
798         int idx = 0;
799         unsigned int count = 0;
800         char *end = NULL;
801         int min, max;
802         bool *vf_floating_veb = opaque;
803
804         while (isblank(*floating_veb_value))
805                 floating_veb_value++;
806
807         /* Reset floating VEB configuration for VFs */
808         for (idx = 0; idx < I40E_MAX_VF; idx++)
809                 vf_floating_veb[idx] = false;
810
811         min = I40E_MAX_VF;
812         do {
813                 while (isblank(*floating_veb_value))
814                         floating_veb_value++;
815                 if (*floating_veb_value == '\0')
816                         return -1;
817                 errno = 0;
818                 idx = strtoul(floating_veb_value, &end, 10);
819                 if (errno || end == NULL)
820                         return -1;
821                 while (isblank(*end))
822                         end++;
823                 if (*end == '-') {
824                         min = idx;
825                 } else if ((*end == ';') || (*end == '\0')) {
826                         max = idx;
827                         if (min == I40E_MAX_VF)
828                                 min = idx;
829                         if (max >= I40E_MAX_VF)
830                                 max = I40E_MAX_VF - 1;
831                         for (idx = min; idx <= max; idx++) {
832                                 vf_floating_veb[idx] = true;
833                                 count++;
834                         }
835                         min = I40E_MAX_VF;
836                 } else {
837                         return -1;
838                 }
839                 floating_veb_value = end + 1;
840         } while (*end != '\0');
841
842         if (count == 0)
843                 return -1;
844
845         return 0;
846 }
847
848 static void
849 config_vf_floating_veb(struct rte_devargs *devargs,
850                        uint16_t floating_veb,
851                        bool *vf_floating_veb)
852 {
853         struct rte_kvargs *kvlist;
854         int i;
855         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
856
857         if (!floating_veb)
858                 return;
859         /* All the VFs attach to the floating VEB by default
860          * when the floating VEB is enabled.
861          */
862         for (i = 0; i < I40E_MAX_VF; i++)
863                 vf_floating_veb[i] = true;
864
865         if (devargs == NULL)
866                 return;
867
868         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
869         if (kvlist == NULL)
870                 return;
871
872         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
873                 rte_kvargs_free(kvlist);
874                 return;
875         }
876         /* When the floating_veb_list parameter exists, all the VFs
877          * will attach to the legacy VEB firstly, then configure VFs
878          * to the floating VEB according to the floating_veb_list.
879          */
880         if (rte_kvargs_process(kvlist, floating_veb_list,
881                                floating_veb_list_handler,
882                                vf_floating_veb) < 0) {
883                 rte_kvargs_free(kvlist);
884                 return;
885         }
886         rte_kvargs_free(kvlist);
887 }
888
889 static int
890 i40e_check_floating_handler(__rte_unused const char *key,
891                             const char *value,
892                             __rte_unused void *opaque)
893 {
894         if (strcmp(value, "1"))
895                 return -1;
896
897         return 0;
898 }
899
900 static int
901 is_floating_veb_supported(struct rte_devargs *devargs)
902 {
903         struct rte_kvargs *kvlist;
904         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
905
906         if (devargs == NULL)
907                 return 0;
908
909         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
910         if (kvlist == NULL)
911                 return 0;
912
913         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
914                 rte_kvargs_free(kvlist);
915                 return 0;
916         }
917         /* Floating VEB is enabled when there's key-value:
918          * enable_floating_veb=1
919          */
920         if (rte_kvargs_process(kvlist, floating_veb_key,
921                                i40e_check_floating_handler, NULL) < 0) {
922                 rte_kvargs_free(kvlist);
923                 return 0;
924         }
925         rte_kvargs_free(kvlist);
926
927         return 1;
928 }
929
930 static void
931 config_floating_veb(struct rte_eth_dev *dev)
932 {
933         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
934         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
935         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
936
937         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
938
939         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
940                 pf->floating_veb =
941                         is_floating_veb_supported(pci_dev->device.devargs);
942                 config_vf_floating_veb(pci_dev->device.devargs,
943                                        pf->floating_veb,
944                                        pf->floating_veb_list);
945         } else {
946                 pf->floating_veb = false;
947         }
948 }
949
950 #define I40E_L2_TAGS_S_TAG_SHIFT 1
951 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
952
953 static int
954 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
955 {
956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
957         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
958         char ethertype_hash_name[RTE_HASH_NAMESIZE];
959         int ret;
960
961         struct rte_hash_parameters ethertype_hash_params = {
962                 .name = ethertype_hash_name,
963                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
964                 .key_len = sizeof(struct i40e_ethertype_filter_input),
965                 .hash_func = rte_hash_crc,
966                 .hash_func_init_val = 0,
967                 .socket_id = rte_socket_id(),
968         };
969
970         /* Initialize ethertype filter rule list and hash */
971         TAILQ_INIT(&ethertype_rule->ethertype_list);
972         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
973                  "ethertype_%s", dev->device->name);
974         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
975         if (!ethertype_rule->hash_table) {
976                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
977                 return -EINVAL;
978         }
979         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
980                                        sizeof(struct i40e_ethertype_filter *) *
981                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
982                                        0);
983         if (!ethertype_rule->hash_map) {
984                 PMD_INIT_LOG(ERR,
985                              "Failed to allocate memory for ethertype hash map!");
986                 ret = -ENOMEM;
987                 goto err_ethertype_hash_map_alloc;
988         }
989
990         return 0;
991
992 err_ethertype_hash_map_alloc:
993         rte_hash_free(ethertype_rule->hash_table);
994
995         return ret;
996 }
997
998 static int
999 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1000 {
1001         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1002         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1003         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1004         int ret;
1005
1006         struct rte_hash_parameters tunnel_hash_params = {
1007                 .name = tunnel_hash_name,
1008                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1009                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1010                 .hash_func = rte_hash_crc,
1011                 .hash_func_init_val = 0,
1012                 .socket_id = rte_socket_id(),
1013         };
1014
1015         /* Initialize tunnel filter rule list and hash */
1016         TAILQ_INIT(&tunnel_rule->tunnel_list);
1017         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1018                  "tunnel_%s", dev->device->name);
1019         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1020         if (!tunnel_rule->hash_table) {
1021                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1022                 return -EINVAL;
1023         }
1024         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1025                                     sizeof(struct i40e_tunnel_filter *) *
1026                                     I40E_MAX_TUNNEL_FILTER_NUM,
1027                                     0);
1028         if (!tunnel_rule->hash_map) {
1029                 PMD_INIT_LOG(ERR,
1030                              "Failed to allocate memory for tunnel hash map!");
1031                 ret = -ENOMEM;
1032                 goto err_tunnel_hash_map_alloc;
1033         }
1034
1035         return 0;
1036
1037 err_tunnel_hash_map_alloc:
1038         rte_hash_free(tunnel_rule->hash_table);
1039
1040         return ret;
1041 }
1042
1043 static int
1044 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1045 {
1046         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1047         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1048         struct i40e_fdir_info *fdir_info = &pf->fdir;
1049         char fdir_hash_name[RTE_HASH_NAMESIZE];
1050         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1051         uint32_t best = hw->func_caps.fd_filters_best_effort;
1052         struct rte_bitmap *bmp = NULL;
1053         uint32_t bmp_size;
1054         void *mem = NULL;
1055         uint32_t i = 0;
1056         int ret;
1057
1058         struct rte_hash_parameters fdir_hash_params = {
1059                 .name = fdir_hash_name,
1060                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1061                 .key_len = sizeof(struct i40e_fdir_input),
1062                 .hash_func = rte_hash_crc,
1063                 .hash_func_init_val = 0,
1064                 .socket_id = rte_socket_id(),
1065         };
1066
1067         /* Initialize flow director filter rule list and hash */
1068         TAILQ_INIT(&fdir_info->fdir_list);
1069         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1070                  "fdir_%s", dev->device->name);
1071         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1072         if (!fdir_info->hash_table) {
1073                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1074                 return -EINVAL;
1075         }
1076
1077         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1078                                           sizeof(struct i40e_fdir_filter *) *
1079                                           I40E_MAX_FDIR_FILTER_NUM,
1080                                           0);
1081         if (!fdir_info->hash_map) {
1082                 PMD_INIT_LOG(ERR,
1083                              "Failed to allocate memory for fdir hash map!");
1084                 ret = -ENOMEM;
1085                 goto err_fdir_hash_map_alloc;
1086         }
1087
1088         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1089                         sizeof(struct i40e_fdir_filter) *
1090                         I40E_MAX_FDIR_FILTER_NUM,
1091                         0);
1092
1093         if (!fdir_info->fdir_filter_array) {
1094                 PMD_INIT_LOG(ERR,
1095                              "Failed to allocate memory for fdir filter array!");
1096                 ret = -ENOMEM;
1097                 goto err_fdir_filter_array_alloc;
1098         }
1099
1100         fdir_info->fdir_space_size = alloc + best;
1101         fdir_info->fdir_actual_cnt = 0;
1102         fdir_info->fdir_guarantee_total_space = alloc;
1103         fdir_info->fdir_guarantee_free_space =
1104                 fdir_info->fdir_guarantee_total_space;
1105
1106         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1107
1108         fdir_info->fdir_flow_pool.pool =
1109                         rte_zmalloc("i40e_fdir_entry",
1110                                 sizeof(struct i40e_fdir_entry) *
1111                                 fdir_info->fdir_space_size,
1112                                 0);
1113
1114         if (!fdir_info->fdir_flow_pool.pool) {
1115                 PMD_INIT_LOG(ERR,
1116                              "Failed to allocate memory for bitmap flow!");
1117                 ret = -ENOMEM;
1118                 goto err_fdir_bitmap_flow_alloc;
1119         }
1120
1121         for (i = 0; i < fdir_info->fdir_space_size; i++)
1122                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1123
1124         bmp_size =
1125                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1126         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1127         if (mem == NULL) {
1128                 PMD_INIT_LOG(ERR,
1129                              "Failed to allocate memory for fdir bitmap!");
1130                 ret = -ENOMEM;
1131                 goto err_fdir_mem_alloc;
1132         }
1133         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1134         if (bmp == NULL) {
1135                 PMD_INIT_LOG(ERR,
1136                              "Failed to initialization fdir bitmap!");
1137                 ret = -ENOMEM;
1138                 goto err_fdir_bmp_alloc;
1139         }
1140         for (i = 0; i < fdir_info->fdir_space_size; i++)
1141                 rte_bitmap_set(bmp, i);
1142
1143         fdir_info->fdir_flow_pool.bitmap = bmp;
1144
1145         return 0;
1146
1147 err_fdir_bmp_alloc:
1148         rte_free(mem);
1149 err_fdir_mem_alloc:
1150         rte_free(fdir_info->fdir_flow_pool.pool);
1151 err_fdir_bitmap_flow_alloc:
1152         rte_free(fdir_info->fdir_filter_array);
1153 err_fdir_filter_array_alloc:
1154         rte_free(fdir_info->hash_map);
1155 err_fdir_hash_map_alloc:
1156         rte_hash_free(fdir_info->hash_table);
1157
1158         return ret;
1159 }
1160
1161 static void
1162 i40e_init_customized_info(struct i40e_pf *pf)
1163 {
1164         int i;
1165
1166         /* Initialize customized pctype */
1167         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1168                 pf->customized_pctype[i].index = i;
1169                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1170                 pf->customized_pctype[i].valid = false;
1171         }
1172
1173         pf->gtp_support = false;
1174         pf->esp_support = false;
1175 }
1176
1177 static void
1178 i40e_init_filter_invalidation(struct i40e_pf *pf)
1179 {
1180         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1181         struct i40e_fdir_info *fdir_info = &pf->fdir;
1182         uint32_t glqf_ctl_reg = 0;
1183
1184         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1185         if (!pf->support_multi_driver) {
1186                 fdir_info->fdir_invalprio = 1;
1187                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1188                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1189                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1190         } else {
1191                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1192                         fdir_info->fdir_invalprio = 1;
1193                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1194                 } else {
1195                         fdir_info->fdir_invalprio = 0;
1196                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1197                 }
1198         }
1199 }
1200
1201 void
1202 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1203 {
1204         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1206         struct i40e_queue_regions *info = &pf->queue_region;
1207         uint16_t i;
1208
1209         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1210                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1211
1212         memset(info, 0, sizeof(struct i40e_queue_regions));
1213 }
1214
1215 static int
1216 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1217                                const char *value,
1218                                void *opaque)
1219 {
1220         struct i40e_pf *pf;
1221         unsigned long support_multi_driver;
1222         char *end;
1223
1224         pf = (struct i40e_pf *)opaque;
1225
1226         errno = 0;
1227         support_multi_driver = strtoul(value, &end, 10);
1228         if (errno != 0 || end == value || *end != 0) {
1229                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1230                 return -(EINVAL);
1231         }
1232
1233         if (support_multi_driver == 1 || support_multi_driver == 0)
1234                 pf->support_multi_driver = (bool)support_multi_driver;
1235         else
1236                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1237                             "enable global configuration by default."
1238                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1239         return 0;
1240 }
1241
1242 static int
1243 i40e_support_multi_driver(struct rte_eth_dev *dev)
1244 {
1245         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1246         struct rte_kvargs *kvlist;
1247         int kvargs_count;
1248
1249         /* Enable global configuration by default */
1250         pf->support_multi_driver = false;
1251
1252         if (!dev->device->devargs)
1253                 return 0;
1254
1255         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1256         if (!kvlist)
1257                 return -EINVAL;
1258
1259         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1260         if (!kvargs_count) {
1261                 rte_kvargs_free(kvlist);
1262                 return 0;
1263         }
1264
1265         if (kvargs_count > 1)
1266                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1267                             "the first invalid or last valid one is used !",
1268                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1269
1270         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1271                                i40e_parse_multi_drv_handler, pf) < 0) {
1272                 rte_kvargs_free(kvlist);
1273                 return -EINVAL;
1274         }
1275
1276         rte_kvargs_free(kvlist);
1277         return 0;
1278 }
1279
1280 static int
1281 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1282                                     uint32_t reg_addr, uint64_t reg_val,
1283                                     struct i40e_asq_cmd_details *cmd_details)
1284 {
1285         uint64_t ori_reg_val;
1286         struct rte_eth_dev *dev;
1287         int ret;
1288
1289         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1290         if (ret != I40E_SUCCESS) {
1291                 PMD_DRV_LOG(ERR,
1292                             "Fail to debug read from 0x%08x",
1293                             reg_addr);
1294                 return -EIO;
1295         }
1296         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1297
1298         if (ori_reg_val != reg_val)
1299                 PMD_DRV_LOG(WARNING,
1300                             "i40e device %s changed global register [0x%08x]."
1301                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1302                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1303
1304         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1305 }
1306
1307 static int
1308 i40e_parse_latest_vec_handler(__rte_unused const char *key,
1309                                 const char *value,
1310                                 void *opaque)
1311 {
1312         struct i40e_adapter *ad = opaque;
1313         int use_latest_vec;
1314
1315         use_latest_vec = atoi(value);
1316
1317         if (use_latest_vec != 0 && use_latest_vec != 1)
1318                 PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
1319
1320         ad->use_latest_vec = (uint8_t)use_latest_vec;
1321
1322         return 0;
1323 }
1324
1325 static int
1326 i40e_use_latest_vec(struct rte_eth_dev *dev)
1327 {
1328         struct i40e_adapter *ad =
1329                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1330         struct rte_kvargs *kvlist;
1331         int kvargs_count;
1332
1333         ad->use_latest_vec = false;
1334
1335         if (!dev->device->devargs)
1336                 return 0;
1337
1338         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1339         if (!kvlist)
1340                 return -EINVAL;
1341
1342         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
1343         if (!kvargs_count) {
1344                 rte_kvargs_free(kvlist);
1345                 return 0;
1346         }
1347
1348         if (kvargs_count > 1)
1349                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1350                             "the first invalid or last valid one is used !",
1351                             ETH_I40E_USE_LATEST_VEC);
1352
1353         if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
1354                                 i40e_parse_latest_vec_handler, ad) < 0) {
1355                 rte_kvargs_free(kvlist);
1356                 return -EINVAL;
1357         }
1358
1359         rte_kvargs_free(kvlist);
1360         return 0;
1361 }
1362
1363 static int
1364 read_vf_msg_config(__rte_unused const char *key,
1365                                const char *value,
1366                                void *opaque)
1367 {
1368         struct i40e_vf_msg_cfg *cfg = opaque;
1369
1370         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1371                         &cfg->ignore_second) != 3) {
1372                 memset(cfg, 0, sizeof(*cfg));
1373                 PMD_DRV_LOG(ERR, "format error! example: "
1374                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1375                 return -EINVAL;
1376         }
1377
1378         /*
1379          * If the message validation function been enabled, the 'period'
1380          * and 'ignore_second' must greater than 0.
1381          */
1382         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1383                 memset(cfg, 0, sizeof(*cfg));
1384                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1385                                 " number must be greater than 0!",
1386                                 ETH_I40E_VF_MSG_CFG);
1387                 return -EINVAL;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1395                 struct i40e_vf_msg_cfg *msg_cfg)
1396 {
1397         struct rte_kvargs *kvlist;
1398         int kvargs_count;
1399         int ret = 0;
1400
1401         memset(msg_cfg, 0, sizeof(*msg_cfg));
1402
1403         if (!dev->device->devargs)
1404                 return ret;
1405
1406         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1407         if (!kvlist)
1408                 return -EINVAL;
1409
1410         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1411         if (!kvargs_count)
1412                 goto free_end;
1413
1414         if (kvargs_count > 1) {
1415                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1416                                 ETH_I40E_VF_MSG_CFG);
1417                 ret = -EINVAL;
1418                 goto free_end;
1419         }
1420
1421         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1422                         read_vf_msg_config, msg_cfg) < 0)
1423                 ret = -EINVAL;
1424
1425 free_end:
1426         rte_kvargs_free(kvlist);
1427         return ret;
1428 }
1429
1430 #define I40E_ALARM_INTERVAL 50000 /* us */
1431
1432 static int
1433 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1434 {
1435         struct rte_pci_device *pci_dev;
1436         struct rte_intr_handle *intr_handle;
1437         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1439         struct i40e_vsi *vsi;
1440         int ret;
1441         uint32_t len, val;
1442         uint8_t aq_fail = 0;
1443
1444         PMD_INIT_FUNC_TRACE();
1445
1446         dev->dev_ops = &i40e_eth_dev_ops;
1447         dev->rx_queue_count = i40e_dev_rx_queue_count;
1448         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1449         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1450         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1451         dev->rx_pkt_burst = i40e_recv_pkts;
1452         dev->tx_pkt_burst = i40e_xmit_pkts;
1453         dev->tx_pkt_prepare = i40e_prep_pkts;
1454
1455         /* for secondary processes, we don't initialise any further as primary
1456          * has already done this work. Only check we don't need a different
1457          * RX function */
1458         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1459                 i40e_set_rx_function(dev);
1460                 i40e_set_tx_function(dev);
1461                 return 0;
1462         }
1463         i40e_set_default_ptype_table(dev);
1464         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1465         intr_handle = &pci_dev->intr_handle;
1466
1467         rte_eth_copy_pci_info(dev, pci_dev);
1468
1469         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1470         pf->adapter->eth_dev = dev;
1471         pf->dev_data = dev->data;
1472
1473         hw->back = I40E_PF_TO_ADAPTER(pf);
1474         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1475         if (!hw->hw_addr) {
1476                 PMD_INIT_LOG(ERR,
1477                         "Hardware is not available, as address is NULL");
1478                 return -ENODEV;
1479         }
1480
1481         hw->vendor_id = pci_dev->id.vendor_id;
1482         hw->device_id = pci_dev->id.device_id;
1483         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1484         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1485         hw->bus.device = pci_dev->addr.devid;
1486         hw->bus.func = pci_dev->addr.function;
1487         hw->adapter_stopped = 0;
1488         hw->adapter_closed = 0;
1489
1490         /* Init switch device pointer */
1491         hw->switch_dev = NULL;
1492
1493         /*
1494          * Switch Tag value should not be identical to either the First Tag
1495          * or Second Tag values. So set something other than common Ethertype
1496          * for internal switching.
1497          */
1498         hw->switch_tag = 0xffff;
1499
1500         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1501         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1502                 PMD_INIT_LOG(ERR, "\nERROR: "
1503                         "Firmware recovery mode detected. Limiting functionality.\n"
1504                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1505                         "User Guide for details on firmware recovery mode.");
1506                 return -EIO;
1507         }
1508
1509         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1510         /* Check if need to support multi-driver */
1511         i40e_support_multi_driver(dev);
1512         /* Check if users want the latest supported vec path */
1513         i40e_use_latest_vec(dev);
1514
1515         /* Make sure all is clean before doing PF reset */
1516         i40e_clear_hw(hw);
1517
1518         /* Reset here to make sure all is clean for each PF */
1519         ret = i40e_pf_reset(hw);
1520         if (ret) {
1521                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1522                 return ret;
1523         }
1524
1525         /* Initialize the shared code (base driver) */
1526         ret = i40e_init_shared_code(hw);
1527         if (ret) {
1528                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1529                 return ret;
1530         }
1531
1532         /* Initialize the parameters for adminq */
1533         i40e_init_adminq_parameter(hw);
1534         ret = i40e_init_adminq(hw);
1535         if (ret != I40E_SUCCESS) {
1536                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1537                 return -EIO;
1538         }
1539         /* Firmware of SFP x722 does not support adminq option */
1540         if (hw->device_id == I40E_DEV_ID_SFP_X722)
1541                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1542
1543         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1544                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1545                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1546                      ((hw->nvm.version >> 12) & 0xf),
1547                      ((hw->nvm.version >> 4) & 0xff),
1548                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1549
1550         /* Initialize the hardware */
1551         i40e_hw_init(dev);
1552
1553         i40e_config_automask(pf);
1554
1555         i40e_set_default_pctype_table(dev);
1556
1557         /*
1558          * To work around the NVM issue, initialize registers
1559          * for packet type of QinQ by software.
1560          * It should be removed once issues are fixed in NVM.
1561          */
1562         if (!pf->support_multi_driver)
1563                 i40e_GLQF_reg_init(hw);
1564
1565         /* Initialize the input set for filters (hash and fd) to default value */
1566         i40e_filter_input_set_init(pf);
1567
1568         /* initialise the L3_MAP register */
1569         if (!pf->support_multi_driver) {
1570                 ret = i40e_aq_debug_write_global_register(hw,
1571                                                    I40E_GLQF_L3_MAP(40),
1572                                                    0x00000028,  NULL);
1573                 if (ret)
1574                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1575                                      ret);
1576                 PMD_INIT_LOG(DEBUG,
1577                              "Global register 0x%08x is changed with 0x28",
1578                              I40E_GLQF_L3_MAP(40));
1579         }
1580
1581         /* Need the special FW version to support floating VEB */
1582         config_floating_veb(dev);
1583         /* Clear PXE mode */
1584         i40e_clear_pxe_mode(hw);
1585         i40e_dev_sync_phy_type(hw);
1586
1587         /*
1588          * On X710, performance number is far from the expectation on recent
1589          * firmware versions. The fix for this issue may not be integrated in
1590          * the following firmware version. So the workaround in software driver
1591          * is needed. It needs to modify the initial values of 3 internal only
1592          * registers. Note that the workaround can be removed when it is fixed
1593          * in firmware in the future.
1594          */
1595         i40e_configure_registers(hw);
1596
1597         /* Get hw capabilities */
1598         ret = i40e_get_cap(hw);
1599         if (ret != I40E_SUCCESS) {
1600                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1601                 goto err_get_capabilities;
1602         }
1603
1604         /* Initialize parameters for PF */
1605         ret = i40e_pf_parameter_init(dev);
1606         if (ret != 0) {
1607                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1608                 goto err_parameter_init;
1609         }
1610
1611         /* Initialize the queue management */
1612         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1613         if (ret < 0) {
1614                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1615                 goto err_qp_pool_init;
1616         }
1617         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1618                                 hw->func_caps.num_msix_vectors - 1);
1619         if (ret < 0) {
1620                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1621                 goto err_msix_pool_init;
1622         }
1623
1624         /* Initialize lan hmc */
1625         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1626                                 hw->func_caps.num_rx_qp, 0, 0);
1627         if (ret != I40E_SUCCESS) {
1628                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1629                 goto err_init_lan_hmc;
1630         }
1631
1632         /* Configure lan hmc */
1633         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1634         if (ret != I40E_SUCCESS) {
1635                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1636                 goto err_configure_lan_hmc;
1637         }
1638
1639         /* Get and check the mac address */
1640         i40e_get_mac_addr(hw, hw->mac.addr);
1641         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1642                 PMD_INIT_LOG(ERR, "mac address is not valid");
1643                 ret = -EIO;
1644                 goto err_get_mac_addr;
1645         }
1646         /* Copy the permanent MAC address */
1647         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1648                         (struct rte_ether_addr *)hw->mac.perm_addr);
1649
1650         /* Disable flow control */
1651         hw->fc.requested_mode = I40E_FC_NONE;
1652         i40e_set_fc(hw, &aq_fail, TRUE);
1653
1654         /* Set the global registers with default ether type value */
1655         if (!pf->support_multi_driver) {
1656                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1657                                          RTE_ETHER_TYPE_VLAN);
1658                 if (ret != I40E_SUCCESS) {
1659                         PMD_INIT_LOG(ERR,
1660                                      "Failed to set the default outer "
1661                                      "VLAN ether type");
1662                         goto err_setup_pf_switch;
1663                 }
1664         }
1665
1666         /* PF setup, which includes VSI setup */
1667         ret = i40e_pf_setup(pf);
1668         if (ret) {
1669                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1670                 goto err_setup_pf_switch;
1671         }
1672
1673         vsi = pf->main_vsi;
1674
1675         /* Disable double vlan by default */
1676         i40e_vsi_config_double_vlan(vsi, FALSE);
1677
1678         /* Disable S-TAG identification when floating_veb is disabled */
1679         if (!pf->floating_veb) {
1680                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1681                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1682                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1683                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1684                 }
1685         }
1686
1687         if (!vsi->max_macaddrs)
1688                 len = RTE_ETHER_ADDR_LEN;
1689         else
1690                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1691
1692         /* Should be after VSI initialized */
1693         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1694         if (!dev->data->mac_addrs) {
1695                 PMD_INIT_LOG(ERR,
1696                         "Failed to allocated memory for storing mac address");
1697                 goto err_mac_alloc;
1698         }
1699         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1700                                         &dev->data->mac_addrs[0]);
1701
1702         /* Init dcb to sw mode by default */
1703         ret = i40e_dcb_init_configure(dev, TRUE);
1704         if (ret != I40E_SUCCESS) {
1705                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1706                 pf->flags &= ~I40E_FLAG_DCB;
1707         }
1708         /* Update HW struct after DCB configuration */
1709         i40e_get_cap(hw);
1710
1711         /* initialize pf host driver to setup SRIOV resource if applicable */
1712         i40e_pf_host_init(dev);
1713
1714         /* register callback func to eal lib */
1715         rte_intr_callback_register(intr_handle,
1716                                    i40e_dev_interrupt_handler, dev);
1717
1718         /* configure and enable device interrupt */
1719         i40e_pf_config_irq0(hw, TRUE);
1720         i40e_pf_enable_irq0(hw);
1721
1722         /* enable uio intr after callback register */
1723         rte_intr_enable(intr_handle);
1724
1725         /* By default disable flexible payload in global configuration */
1726         if (!pf->support_multi_driver)
1727                 i40e_flex_payload_reg_set_default(hw);
1728
1729         /*
1730          * Add an ethertype filter to drop all flow control frames transmitted
1731          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1732          * frames to wire.
1733          */
1734         i40e_add_tx_flow_control_drop_filter(pf);
1735
1736         /* Set the max frame size to 0x2600 by default,
1737          * in case other drivers changed the default value.
1738          */
1739         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1740
1741         /* initialize mirror rule list */
1742         TAILQ_INIT(&pf->mirror_list);
1743
1744         /* initialize RSS rule list */
1745         TAILQ_INIT(&pf->rss_config_list);
1746
1747         /* initialize Traffic Manager configuration */
1748         i40e_tm_conf_init(dev);
1749
1750         /* Initialize customized information */
1751         i40e_init_customized_info(pf);
1752
1753         /* Initialize the filter invalidation configuration */
1754         i40e_init_filter_invalidation(pf);
1755
1756         ret = i40e_init_ethtype_filter_list(dev);
1757         if (ret < 0)
1758                 goto err_init_ethtype_filter_list;
1759         ret = i40e_init_tunnel_filter_list(dev);
1760         if (ret < 0)
1761                 goto err_init_tunnel_filter_list;
1762         ret = i40e_init_fdir_filter_list(dev);
1763         if (ret < 0)
1764                 goto err_init_fdir_filter_list;
1765
1766         /* initialize queue region configuration */
1767         i40e_init_queue_region_conf(dev);
1768
1769         /* initialize RSS configuration from rte_flow */
1770         memset(&pf->rss_info, 0,
1771                 sizeof(struct i40e_rte_flow_rss_conf));
1772
1773         /* reset all stats of the device, including pf and main vsi */
1774         i40e_dev_stats_reset(dev);
1775
1776         return 0;
1777
1778 err_init_fdir_filter_list:
1779         rte_free(pf->tunnel.hash_table);
1780         rte_free(pf->tunnel.hash_map);
1781 err_init_tunnel_filter_list:
1782         rte_free(pf->ethertype.hash_table);
1783         rte_free(pf->ethertype.hash_map);
1784 err_init_ethtype_filter_list:
1785         rte_free(dev->data->mac_addrs);
1786         dev->data->mac_addrs = NULL;
1787 err_mac_alloc:
1788         i40e_vsi_release(pf->main_vsi);
1789 err_setup_pf_switch:
1790 err_get_mac_addr:
1791 err_configure_lan_hmc:
1792         (void)i40e_shutdown_lan_hmc(hw);
1793 err_init_lan_hmc:
1794         i40e_res_pool_destroy(&pf->msix_pool);
1795 err_msix_pool_init:
1796         i40e_res_pool_destroy(&pf->qp_pool);
1797 err_qp_pool_init:
1798 err_parameter_init:
1799 err_get_capabilities:
1800         (void)i40e_shutdown_adminq(hw);
1801
1802         return ret;
1803 }
1804
1805 static void
1806 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1807 {
1808         struct i40e_ethertype_filter *p_ethertype;
1809         struct i40e_ethertype_rule *ethertype_rule;
1810
1811         ethertype_rule = &pf->ethertype;
1812         /* Remove all ethertype filter rules and hash */
1813         if (ethertype_rule->hash_map)
1814                 rte_free(ethertype_rule->hash_map);
1815         if (ethertype_rule->hash_table)
1816                 rte_hash_free(ethertype_rule->hash_table);
1817
1818         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1819                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1820                              p_ethertype, rules);
1821                 rte_free(p_ethertype);
1822         }
1823 }
1824
1825 static void
1826 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1827 {
1828         struct i40e_tunnel_filter *p_tunnel;
1829         struct i40e_tunnel_rule *tunnel_rule;
1830
1831         tunnel_rule = &pf->tunnel;
1832         /* Remove all tunnel director rules and hash */
1833         if (tunnel_rule->hash_map)
1834                 rte_free(tunnel_rule->hash_map);
1835         if (tunnel_rule->hash_table)
1836                 rte_hash_free(tunnel_rule->hash_table);
1837
1838         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1839                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1840                 rte_free(p_tunnel);
1841         }
1842 }
1843
1844 static void
1845 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1846 {
1847         struct i40e_fdir_filter *p_fdir;
1848         struct i40e_fdir_info *fdir_info;
1849
1850         fdir_info = &pf->fdir;
1851
1852         /* Remove all flow director rules */
1853         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1854                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1855 }
1856
1857 static void
1858 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1859 {
1860         struct i40e_fdir_info *fdir_info;
1861
1862         fdir_info = &pf->fdir;
1863
1864         /* flow director memory cleanup */
1865         if (fdir_info->hash_map)
1866                 rte_free(fdir_info->hash_map);
1867         if (fdir_info->hash_table)
1868                 rte_hash_free(fdir_info->hash_table);
1869         if (fdir_info->fdir_flow_pool.bitmap)
1870                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1871         if (fdir_info->fdir_flow_pool.pool)
1872                 rte_free(fdir_info->fdir_flow_pool.pool);
1873         if (fdir_info->fdir_filter_array)
1874                 rte_free(fdir_info->fdir_filter_array);
1875 }
1876
1877 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1878 {
1879         /*
1880          * Disable by default flexible payload
1881          * for corresponding L2/L3/L4 layers.
1882          */
1883         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1884         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1885         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1886 }
1887
1888 static int
1889 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1890 {
1891         struct i40e_hw *hw;
1892
1893         PMD_INIT_FUNC_TRACE();
1894
1895         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1896                 return 0;
1897
1898         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899
1900         if (hw->adapter_closed == 0)
1901                 i40e_dev_close(dev);
1902
1903         return 0;
1904 }
1905
1906 static int
1907 i40e_dev_configure(struct rte_eth_dev *dev)
1908 {
1909         struct i40e_adapter *ad =
1910                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1911         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1912         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1913         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1914         int i, ret;
1915
1916         ret = i40e_dev_sync_phy_type(hw);
1917         if (ret)
1918                 return ret;
1919
1920         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1921          * bulk allocation or vector Rx preconditions we will reset it.
1922          */
1923         ad->rx_bulk_alloc_allowed = true;
1924         ad->rx_vec_allowed = true;
1925         ad->tx_simple_allowed = true;
1926         ad->tx_vec_allowed = true;
1927
1928         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1929                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1930
1931         /* Only legacy filter API needs the following fdir config. So when the
1932          * legacy filter API is deprecated, the following codes should also be
1933          * removed.
1934          */
1935         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1936                 ret = i40e_fdir_setup(pf);
1937                 if (ret != I40E_SUCCESS) {
1938                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1939                         return -ENOTSUP;
1940                 }
1941                 ret = i40e_fdir_configure(dev);
1942                 if (ret < 0) {
1943                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1944                         goto err;
1945                 }
1946         } else
1947                 i40e_fdir_teardown(pf);
1948
1949         ret = i40e_dev_init_vlan(dev);
1950         if (ret < 0)
1951                 goto err;
1952
1953         /* VMDQ setup.
1954          *  General PMD driver call sequence are NIC init, configure,
1955          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1956          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1957          *  applicable. So, VMDQ setting has to be done before
1958          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1959          *  For RSS setting, it will try to calculate actual configured RX queue
1960          *  number, which will be available after rx_queue_setup(). dev_start()
1961          *  function is good to place RSS setup.
1962          */
1963         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1964                 ret = i40e_vmdq_setup(dev);
1965                 if (ret)
1966                         goto err;
1967         }
1968
1969         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1970                 ret = i40e_dcb_setup(dev);
1971                 if (ret) {
1972                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1973                         goto err_dcb;
1974                 }
1975         }
1976
1977         TAILQ_INIT(&pf->flow_list);
1978
1979         return 0;
1980
1981 err_dcb:
1982         /* need to release vmdq resource if exists */
1983         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1984                 i40e_vsi_release(pf->vmdq[i].vsi);
1985                 pf->vmdq[i].vsi = NULL;
1986         }
1987         rte_free(pf->vmdq);
1988         pf->vmdq = NULL;
1989 err:
1990         /* Need to release fdir resource if exists.
1991          * Only legacy filter API needs the following fdir config. So when the
1992          * legacy filter API is deprecated, the following code should also be
1993          * removed.
1994          */
1995         i40e_fdir_teardown(pf);
1996         return ret;
1997 }
1998
1999 void
2000 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
2001 {
2002         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2003         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2004         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2005         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2006         uint16_t msix_vect = vsi->msix_intr;
2007         uint16_t i;
2008
2009         for (i = 0; i < vsi->nb_qps; i++) {
2010                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2011                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2012                 rte_wmb();
2013         }
2014
2015         if (vsi->type != I40E_VSI_SRIOV) {
2016                 if (!rte_intr_allow_others(intr_handle)) {
2017                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2018                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2019                         I40E_WRITE_REG(hw,
2020                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2021                                        0);
2022                 } else {
2023                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2024                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2025                         I40E_WRITE_REG(hw,
2026                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2027                                                        msix_vect - 1), 0);
2028                 }
2029         } else {
2030                 uint32_t reg;
2031                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2032                         vsi->user_param + (msix_vect - 1);
2033
2034                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2035                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2036         }
2037         I40E_WRITE_FLUSH(hw);
2038 }
2039
2040 static void
2041 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2042                        int base_queue, int nb_queue,
2043                        uint16_t itr_idx)
2044 {
2045         int i;
2046         uint32_t val;
2047         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2048         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2049
2050         /* Bind all RX queues to allocated MSIX interrupt */
2051         for (i = 0; i < nb_queue; i++) {
2052                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2053                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2054                         ((base_queue + i + 1) <<
2055                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2056                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2057                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2058
2059                 if (i == nb_queue - 1)
2060                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2061                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2062         }
2063
2064         /* Write first RX queue to Link list register as the head element */
2065         if (vsi->type != I40E_VSI_SRIOV) {
2066                 uint16_t interval =
2067                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2068
2069                 if (msix_vect == I40E_MISC_VEC_ID) {
2070                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2071                                        (base_queue <<
2072                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2073                                        (0x0 <<
2074                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2075                         I40E_WRITE_REG(hw,
2076                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2077                                        interval);
2078                 } else {
2079                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2080                                        (base_queue <<
2081                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2082                                        (0x0 <<
2083                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2084                         I40E_WRITE_REG(hw,
2085                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2086                                                        msix_vect - 1),
2087                                        interval);
2088                 }
2089         } else {
2090                 uint32_t reg;
2091
2092                 if (msix_vect == I40E_MISC_VEC_ID) {
2093                         I40E_WRITE_REG(hw,
2094                                        I40E_VPINT_LNKLST0(vsi->user_param),
2095                                        (base_queue <<
2096                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2097                                        (0x0 <<
2098                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2099                 } else {
2100                         /* num_msix_vectors_vf needs to minus irq0 */
2101                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2102                                 vsi->user_param + (msix_vect - 1);
2103
2104                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2105                                        (base_queue <<
2106                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2107                                        (0x0 <<
2108                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2109                 }
2110         }
2111
2112         I40E_WRITE_FLUSH(hw);
2113 }
2114
2115 int
2116 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2117 {
2118         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2119         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2120         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2121         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2122         uint16_t msix_vect = vsi->msix_intr;
2123         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2124         uint16_t queue_idx = 0;
2125         int record = 0;
2126         int i;
2127
2128         for (i = 0; i < vsi->nb_qps; i++) {
2129                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2130                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2131         }
2132
2133         /* VF bind interrupt */
2134         if (vsi->type == I40E_VSI_SRIOV) {
2135                 if (vsi->nb_msix == 0) {
2136                         PMD_DRV_LOG(ERR, "No msix resource");
2137                         return -EINVAL;
2138                 }
2139                 __vsi_queues_bind_intr(vsi, msix_vect,
2140                                        vsi->base_queue, vsi->nb_qps,
2141                                        itr_idx);
2142                 return 0;
2143         }
2144
2145         /* PF & VMDq bind interrupt */
2146         if (rte_intr_dp_is_en(intr_handle)) {
2147                 if (vsi->type == I40E_VSI_MAIN) {
2148                         queue_idx = 0;
2149                         record = 1;
2150                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2151                         struct i40e_vsi *main_vsi =
2152                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2153                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2154                         record = 1;
2155                 }
2156         }
2157
2158         for (i = 0; i < vsi->nb_used_qps; i++) {
2159                 if (vsi->nb_msix == 0) {
2160                         PMD_DRV_LOG(ERR, "No msix resource");
2161                         return -EINVAL;
2162                 } else if (nb_msix <= 1) {
2163                         if (!rte_intr_allow_others(intr_handle))
2164                                 /* allow to share MISC_VEC_ID */
2165                                 msix_vect = I40E_MISC_VEC_ID;
2166
2167                         /* no enough msix_vect, map all to one */
2168                         __vsi_queues_bind_intr(vsi, msix_vect,
2169                                                vsi->base_queue + i,
2170                                                vsi->nb_used_qps - i,
2171                                                itr_idx);
2172                         for (; !!record && i < vsi->nb_used_qps; i++)
2173                                 intr_handle->intr_vec[queue_idx + i] =
2174                                         msix_vect;
2175                         break;
2176                 }
2177                 /* 1:1 queue/msix_vect mapping */
2178                 __vsi_queues_bind_intr(vsi, msix_vect,
2179                                        vsi->base_queue + i, 1,
2180                                        itr_idx);
2181                 if (!!record)
2182                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2183
2184                 msix_vect++;
2185                 nb_msix--;
2186         }
2187
2188         return 0;
2189 }
2190
2191 void
2192 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2193 {
2194         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2195         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2196         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2197         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2198         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2199         uint16_t msix_intr, i;
2200
2201         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2202                 for (i = 0; i < vsi->nb_msix; i++) {
2203                         msix_intr = vsi->msix_intr + i;
2204                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2205                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2206                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2207                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2208                 }
2209         else
2210                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2211                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2212                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2213                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2214
2215         I40E_WRITE_FLUSH(hw);
2216 }
2217
2218 void
2219 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2220 {
2221         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2222         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2223         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2224         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2225         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2226         uint16_t msix_intr, i;
2227
2228         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2229                 for (i = 0; i < vsi->nb_msix; i++) {
2230                         msix_intr = vsi->msix_intr + i;
2231                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2232                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2233                 }
2234         else
2235                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2236                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2237
2238         I40E_WRITE_FLUSH(hw);
2239 }
2240
2241 static inline uint8_t
2242 i40e_parse_link_speeds(uint16_t link_speeds)
2243 {
2244         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2245
2246         if (link_speeds & ETH_LINK_SPEED_40G)
2247                 link_speed |= I40E_LINK_SPEED_40GB;
2248         if (link_speeds & ETH_LINK_SPEED_25G)
2249                 link_speed |= I40E_LINK_SPEED_25GB;
2250         if (link_speeds & ETH_LINK_SPEED_20G)
2251                 link_speed |= I40E_LINK_SPEED_20GB;
2252         if (link_speeds & ETH_LINK_SPEED_10G)
2253                 link_speed |= I40E_LINK_SPEED_10GB;
2254         if (link_speeds & ETH_LINK_SPEED_1G)
2255                 link_speed |= I40E_LINK_SPEED_1GB;
2256         if (link_speeds & ETH_LINK_SPEED_100M)
2257                 link_speed |= I40E_LINK_SPEED_100MB;
2258
2259         return link_speed;
2260 }
2261
2262 static int
2263 i40e_phy_conf_link(struct i40e_hw *hw,
2264                    uint8_t abilities,
2265                    uint8_t force_speed,
2266                    bool is_up)
2267 {
2268         enum i40e_status_code status;
2269         struct i40e_aq_get_phy_abilities_resp phy_ab;
2270         struct i40e_aq_set_phy_config phy_conf;
2271         enum i40e_aq_phy_type cnt;
2272         uint8_t avail_speed;
2273         uint32_t phy_type_mask = 0;
2274
2275         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2276                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2277                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2278                         I40E_AQ_PHY_FLAG_LOW_POWER;
2279         int ret = -ENOTSUP;
2280
2281         /* To get phy capabilities of available speeds. */
2282         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2283                                               NULL);
2284         if (status) {
2285                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2286                                 status);
2287                 return ret;
2288         }
2289         avail_speed = phy_ab.link_speed;
2290
2291         /* To get the current phy config. */
2292         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2293                                               NULL);
2294         if (status) {
2295                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2296                                 status);
2297                 return ret;
2298         }
2299
2300         /* If link needs to go up and it is in autoneg mode the speed is OK,
2301          * no need to set up again.
2302          */
2303         if (is_up && phy_ab.phy_type != 0 &&
2304                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2305                      phy_ab.link_speed != 0)
2306                 return I40E_SUCCESS;
2307
2308         memset(&phy_conf, 0, sizeof(phy_conf));
2309
2310         /* bits 0-2 use the values from get_phy_abilities_resp */
2311         abilities &= ~mask;
2312         abilities |= phy_ab.abilities & mask;
2313
2314         phy_conf.abilities = abilities;
2315
2316         /* If link needs to go up, but the force speed is not supported,
2317          * Warn users and config the default available speeds.
2318          */
2319         if (is_up && !(force_speed & avail_speed)) {
2320                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2321                 phy_conf.link_speed = avail_speed;
2322         } else {
2323                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2324         }
2325
2326         /* PHY type mask needs to include each type except PHY type extension */
2327         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2328                 phy_type_mask |= 1 << cnt;
2329
2330         /* use get_phy_abilities_resp value for the rest */
2331         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2332         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2333                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2334                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2335         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2336         phy_conf.eee_capability = phy_ab.eee_capability;
2337         phy_conf.eeer = phy_ab.eeer_val;
2338         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2339
2340         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2341                     phy_ab.abilities, phy_ab.link_speed);
2342         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2343                     phy_conf.abilities, phy_conf.link_speed);
2344
2345         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2346         if (status)
2347                 return ret;
2348
2349         return I40E_SUCCESS;
2350 }
2351
2352 static int
2353 i40e_apply_link_speed(struct rte_eth_dev *dev)
2354 {
2355         uint8_t speed;
2356         uint8_t abilities = 0;
2357         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2358         struct rte_eth_conf *conf = &dev->data->dev_conf;
2359
2360         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2361                      I40E_AQ_PHY_LINK_ENABLED;
2362
2363         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2364                 conf->link_speeds = ETH_LINK_SPEED_40G |
2365                                     ETH_LINK_SPEED_25G |
2366                                     ETH_LINK_SPEED_20G |
2367                                     ETH_LINK_SPEED_10G |
2368                                     ETH_LINK_SPEED_1G |
2369                                     ETH_LINK_SPEED_100M;
2370
2371                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2372         } else {
2373                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2374         }
2375         speed = i40e_parse_link_speeds(conf->link_speeds);
2376
2377         return i40e_phy_conf_link(hw, abilities, speed, true);
2378 }
2379
2380 static int
2381 i40e_dev_start(struct rte_eth_dev *dev)
2382 {
2383         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2384         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2385         struct i40e_vsi *main_vsi = pf->main_vsi;
2386         int ret, i;
2387         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2388         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2389         uint32_t intr_vector = 0;
2390         struct i40e_vsi *vsi;
2391         uint16_t nb_rxq, nb_txq;
2392
2393         hw->adapter_stopped = 0;
2394
2395         rte_intr_disable(intr_handle);
2396
2397         if ((rte_intr_cap_multiple(intr_handle) ||
2398              !RTE_ETH_DEV_SRIOV(dev).active) &&
2399             dev->data->dev_conf.intr_conf.rxq != 0) {
2400                 intr_vector = dev->data->nb_rx_queues;
2401                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2402                 if (ret)
2403                         return ret;
2404         }
2405
2406         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2407                 intr_handle->intr_vec =
2408                         rte_zmalloc("intr_vec",
2409                                     dev->data->nb_rx_queues * sizeof(int),
2410                                     0);
2411                 if (!intr_handle->intr_vec) {
2412                         PMD_INIT_LOG(ERR,
2413                                 "Failed to allocate %d rx_queues intr_vec",
2414                                 dev->data->nb_rx_queues);
2415                         return -ENOMEM;
2416                 }
2417         }
2418
2419         /* Initialize VSI */
2420         ret = i40e_dev_rxtx_init(pf);
2421         if (ret != I40E_SUCCESS) {
2422                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2423                 return ret;
2424         }
2425
2426         /* Map queues with MSIX interrupt */
2427         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2428                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2429         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2430         if (ret < 0)
2431                 return ret;
2432         i40e_vsi_enable_queues_intr(main_vsi);
2433
2434         /* Map VMDQ VSI queues with MSIX interrupt */
2435         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2436                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2437                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2438                                                 I40E_ITR_INDEX_DEFAULT);
2439                 if (ret < 0)
2440                         return ret;
2441                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2442         }
2443
2444         /* Enable all queues which have been configured */
2445         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2446                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2447                 if (ret)
2448                         goto rx_err;
2449         }
2450
2451         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2452                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2453                 if (ret)
2454                         goto tx_err;
2455         }
2456
2457         /* Enable receiving broadcast packets */
2458         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2459         if (ret != I40E_SUCCESS)
2460                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2461
2462         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2463                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2464                                                 true, NULL);
2465                 if (ret != I40E_SUCCESS)
2466                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2467         }
2468
2469         /* Enable the VLAN promiscuous mode. */
2470         if (pf->vfs) {
2471                 for (i = 0; i < pf->vf_num; i++) {
2472                         vsi = pf->vfs[i].vsi;
2473                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2474                                                      true, NULL);
2475                 }
2476         }
2477
2478         /* Enable mac loopback mode */
2479         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2480             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2481                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2482                 if (ret != I40E_SUCCESS) {
2483                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2484                         goto tx_err;
2485                 }
2486         }
2487
2488         /* Apply link configure */
2489         ret = i40e_apply_link_speed(dev);
2490         if (I40E_SUCCESS != ret) {
2491                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2492                 goto tx_err;
2493         }
2494
2495         if (!rte_intr_allow_others(intr_handle)) {
2496                 rte_intr_callback_unregister(intr_handle,
2497                                              i40e_dev_interrupt_handler,
2498                                              (void *)dev);
2499                 /* configure and enable device interrupt */
2500                 i40e_pf_config_irq0(hw, FALSE);
2501                 i40e_pf_enable_irq0(hw);
2502
2503                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2504                         PMD_INIT_LOG(INFO,
2505                                 "lsc won't enable because of no intr multiplex");
2506         } else {
2507                 ret = i40e_aq_set_phy_int_mask(hw,
2508                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2509                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2510                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2511                 if (ret != I40E_SUCCESS)
2512                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2513
2514                 /* Call get_link_info aq commond to enable/disable LSE */
2515                 i40e_dev_link_update(dev, 0);
2516         }
2517
2518         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2519                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2520                                   i40e_dev_alarm_handler, dev);
2521         } else {
2522                 /* enable uio intr after callback register */
2523                 rte_intr_enable(intr_handle);
2524         }
2525
2526         i40e_filter_restore(pf);
2527
2528         if (pf->tm_conf.root && !pf->tm_conf.committed)
2529                 PMD_DRV_LOG(WARNING,
2530                             "please call hierarchy_commit() "
2531                             "before starting the port");
2532
2533         return I40E_SUCCESS;
2534
2535 tx_err:
2536         for (i = 0; i < nb_txq; i++)
2537                 i40e_dev_tx_queue_stop(dev, i);
2538 rx_err:
2539         for (i = 0; i < nb_rxq; i++)
2540                 i40e_dev_rx_queue_stop(dev, i);
2541
2542         return ret;
2543 }
2544
2545 static void
2546 i40e_dev_stop(struct rte_eth_dev *dev)
2547 {
2548         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2549         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2550         struct i40e_vsi *main_vsi = pf->main_vsi;
2551         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2552         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2553         int i;
2554
2555         if (hw->adapter_stopped == 1)
2556                 return;
2557
2558         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2559                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2560                 rte_intr_enable(intr_handle);
2561         }
2562
2563         /* Disable all queues */
2564         for (i = 0; i < dev->data->nb_tx_queues; i++)
2565                 i40e_dev_tx_queue_stop(dev, i);
2566
2567         for (i = 0; i < dev->data->nb_rx_queues; i++)
2568                 i40e_dev_rx_queue_stop(dev, i);
2569
2570         /* un-map queues with interrupt registers */
2571         i40e_vsi_disable_queues_intr(main_vsi);
2572         i40e_vsi_queues_unbind_intr(main_vsi);
2573
2574         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2575                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2576                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2577         }
2578
2579         /* Clear all queues and release memory */
2580         i40e_dev_clear_queues(dev);
2581
2582         /* Set link down */
2583         i40e_dev_set_link_down(dev);
2584
2585         if (!rte_intr_allow_others(intr_handle))
2586                 /* resume to the default handler */
2587                 rte_intr_callback_register(intr_handle,
2588                                            i40e_dev_interrupt_handler,
2589                                            (void *)dev);
2590
2591         /* Clean datapath event and queue/vec mapping */
2592         rte_intr_efd_disable(intr_handle);
2593         if (intr_handle->intr_vec) {
2594                 rte_free(intr_handle->intr_vec);
2595                 intr_handle->intr_vec = NULL;
2596         }
2597
2598         /* reset hierarchy commit */
2599         pf->tm_conf.committed = false;
2600
2601         hw->adapter_stopped = 1;
2602
2603         pf->adapter->rss_reta_updated = 0;
2604 }
2605
2606 static int
2607 i40e_dev_close(struct rte_eth_dev *dev)
2608 {
2609         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2610         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2611         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2612         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2613         struct i40e_mirror_rule *p_mirror;
2614         struct i40e_filter_control_settings settings;
2615         struct rte_flow *p_flow;
2616         uint32_t reg;
2617         int i;
2618         int ret;
2619         uint8_t aq_fail = 0;
2620         int retries = 0;
2621
2622         PMD_INIT_FUNC_TRACE();
2623
2624         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2625         if (ret)
2626                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2627
2628
2629         i40e_dev_stop(dev);
2630
2631         /* Remove all mirror rules */
2632         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2633                 ret = i40e_aq_del_mirror_rule(hw,
2634                                               pf->main_vsi->veb->seid,
2635                                               p_mirror->rule_type,
2636                                               p_mirror->entries,
2637                                               p_mirror->num_entries,
2638                                               p_mirror->id);
2639                 if (ret < 0)
2640                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2641                                     "status = %d, aq_err = %d.", ret,
2642                                     hw->aq.asq_last_status);
2643
2644                 /* remove mirror software resource anyway */
2645                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2646                 rte_free(p_mirror);
2647                 pf->nb_mirror_rule--;
2648         }
2649
2650         i40e_dev_free_queues(dev);
2651
2652         /* Disable interrupt */
2653         i40e_pf_disable_irq0(hw);
2654         rte_intr_disable(intr_handle);
2655
2656         /*
2657          * Only legacy filter API needs the following fdir config. So when the
2658          * legacy filter API is deprecated, the following code should also be
2659          * removed.
2660          */
2661         i40e_fdir_teardown(pf);
2662
2663         /* shutdown and destroy the HMC */
2664         i40e_shutdown_lan_hmc(hw);
2665
2666         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2667                 i40e_vsi_release(pf->vmdq[i].vsi);
2668                 pf->vmdq[i].vsi = NULL;
2669         }
2670         rte_free(pf->vmdq);
2671         pf->vmdq = NULL;
2672
2673         /* release all the existing VSIs and VEBs */
2674         i40e_vsi_release(pf->main_vsi);
2675
2676         /* shutdown the adminq */
2677         i40e_aq_queue_shutdown(hw, true);
2678         i40e_shutdown_adminq(hw);
2679
2680         i40e_res_pool_destroy(&pf->qp_pool);
2681         i40e_res_pool_destroy(&pf->msix_pool);
2682
2683         /* Disable flexible payload in global configuration */
2684         if (!pf->support_multi_driver)
2685                 i40e_flex_payload_reg_set_default(hw);
2686
2687         /* force a PF reset to clean anything leftover */
2688         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2689         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2690                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2691         I40E_WRITE_FLUSH(hw);
2692
2693         dev->dev_ops = NULL;
2694         dev->rx_pkt_burst = NULL;
2695         dev->tx_pkt_burst = NULL;
2696
2697         /* Clear PXE mode */
2698         i40e_clear_pxe_mode(hw);
2699
2700         /* Unconfigure filter control */
2701         memset(&settings, 0, sizeof(settings));
2702         ret = i40e_set_filter_control(hw, &settings);
2703         if (ret)
2704                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2705                                         ret);
2706
2707         /* Disable flow control */
2708         hw->fc.requested_mode = I40E_FC_NONE;
2709         i40e_set_fc(hw, &aq_fail, TRUE);
2710
2711         /* uninitialize pf host driver */
2712         i40e_pf_host_uninit(dev);
2713
2714         do {
2715                 ret = rte_intr_callback_unregister(intr_handle,
2716                                 i40e_dev_interrupt_handler, dev);
2717                 if (ret >= 0 || ret == -ENOENT) {
2718                         break;
2719                 } else if (ret != -EAGAIN) {
2720                         PMD_INIT_LOG(ERR,
2721                                  "intr callback unregister failed: %d",
2722                                  ret);
2723                 }
2724                 i40e_msec_delay(500);
2725         } while (retries++ < 5);
2726
2727         i40e_rm_ethtype_filter_list(pf);
2728         i40e_rm_tunnel_filter_list(pf);
2729         i40e_rm_fdir_filter_list(pf);
2730
2731         /* Remove all flows */
2732         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2733                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2734                 /* Do not free FDIR flows since they are static allocated */
2735                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2736                         rte_free(p_flow);
2737         }
2738
2739         /* release the fdir static allocated memory */
2740         i40e_fdir_memory_cleanup(pf);
2741
2742         /* Remove all Traffic Manager configuration */
2743         i40e_tm_conf_uninit(dev);
2744
2745         hw->adapter_closed = 1;
2746         return 0;
2747 }
2748
2749 /*
2750  * Reset PF device only to re-initialize resources in PMD layer
2751  */
2752 static int
2753 i40e_dev_reset(struct rte_eth_dev *dev)
2754 {
2755         int ret;
2756
2757         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2758          * its VF to make them align with it. The detailed notification
2759          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2760          * To avoid unexpected behavior in VF, currently reset of PF with
2761          * SR-IOV activation is not supported. It might be supported later.
2762          */
2763         if (dev->data->sriov.active)
2764                 return -ENOTSUP;
2765
2766         ret = eth_i40e_dev_uninit(dev);
2767         if (ret)
2768                 return ret;
2769
2770         ret = eth_i40e_dev_init(dev, NULL);
2771
2772         return ret;
2773 }
2774
2775 static int
2776 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2777 {
2778         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2779         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2780         struct i40e_vsi *vsi = pf->main_vsi;
2781         int status;
2782
2783         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2784                                                      true, NULL, true);
2785         if (status != I40E_SUCCESS) {
2786                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2787                 return -EAGAIN;
2788         }
2789
2790         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2791                                                         TRUE, NULL);
2792         if (status != I40E_SUCCESS) {
2793                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2794                 /* Rollback unicast promiscuous mode */
2795                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2796                                                     false, NULL, true);
2797                 return -EAGAIN;
2798         }
2799
2800         return 0;
2801 }
2802
2803 static int
2804 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2805 {
2806         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2807         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2808         struct i40e_vsi *vsi = pf->main_vsi;
2809         int status;
2810
2811         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2812                                                      false, NULL, true);
2813         if (status != I40E_SUCCESS) {
2814                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2815                 return -EAGAIN;
2816         }
2817
2818         /* must remain in all_multicast mode */
2819         if (dev->data->all_multicast == 1)
2820                 return 0;
2821
2822         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2823                                                         false, NULL);
2824         if (status != I40E_SUCCESS) {
2825                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2826                 /* Rollback unicast promiscuous mode */
2827                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2828                                                     true, NULL, true);
2829                 return -EAGAIN;
2830         }
2831
2832         return 0;
2833 }
2834
2835 static int
2836 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2837 {
2838         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2839         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2840         struct i40e_vsi *vsi = pf->main_vsi;
2841         int ret;
2842
2843         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2844         if (ret != I40E_SUCCESS) {
2845                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2846                 return -EAGAIN;
2847         }
2848
2849         return 0;
2850 }
2851
2852 static int
2853 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2854 {
2855         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2856         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2857         struct i40e_vsi *vsi = pf->main_vsi;
2858         int ret;
2859
2860         if (dev->data->promiscuous == 1)
2861                 return 0; /* must remain in all_multicast mode */
2862
2863         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2864                                 vsi->seid, FALSE, NULL);
2865         if (ret != I40E_SUCCESS) {
2866                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2867                 return -EAGAIN;
2868         }
2869
2870         return 0;
2871 }
2872
2873 /*
2874  * Set device link up.
2875  */
2876 static int
2877 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2878 {
2879         /* re-apply link speed setting */
2880         return i40e_apply_link_speed(dev);
2881 }
2882
2883 /*
2884  * Set device link down.
2885  */
2886 static int
2887 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2888 {
2889         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2890         uint8_t abilities = 0;
2891         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2892
2893         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2894         return i40e_phy_conf_link(hw, abilities, speed, false);
2895 }
2896
2897 static __rte_always_inline void
2898 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2899 {
2900 /* Link status registers and values*/
2901 #define I40E_PRTMAC_LINKSTA             0x001E2420
2902 #define I40E_REG_LINK_UP                0x40000080
2903 #define I40E_PRTMAC_MACC                0x001E24E0
2904 #define I40E_REG_MACC_25GB              0x00020000
2905 #define I40E_REG_SPEED_MASK             0x38000000
2906 #define I40E_REG_SPEED_0                0x00000000
2907 #define I40E_REG_SPEED_1                0x08000000
2908 #define I40E_REG_SPEED_2                0x10000000
2909 #define I40E_REG_SPEED_3                0x18000000
2910 #define I40E_REG_SPEED_4                0x20000000
2911         uint32_t link_speed;
2912         uint32_t reg_val;
2913
2914         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2915         link_speed = reg_val & I40E_REG_SPEED_MASK;
2916         reg_val &= I40E_REG_LINK_UP;
2917         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2918
2919         if (unlikely(link->link_status == 0))
2920                 return;
2921
2922         /* Parse the link status */
2923         switch (link_speed) {
2924         case I40E_REG_SPEED_0:
2925                 link->link_speed = ETH_SPEED_NUM_100M;
2926                 break;
2927         case I40E_REG_SPEED_1:
2928                 link->link_speed = ETH_SPEED_NUM_1G;
2929                 break;
2930         case I40E_REG_SPEED_2:
2931                 if (hw->mac.type == I40E_MAC_X722)
2932                         link->link_speed = ETH_SPEED_NUM_2_5G;
2933                 else
2934                         link->link_speed = ETH_SPEED_NUM_10G;
2935                 break;
2936         case I40E_REG_SPEED_3:
2937                 if (hw->mac.type == I40E_MAC_X722) {
2938                         link->link_speed = ETH_SPEED_NUM_5G;
2939                 } else {
2940                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2941
2942                         if (reg_val & I40E_REG_MACC_25GB)
2943                                 link->link_speed = ETH_SPEED_NUM_25G;
2944                         else
2945                                 link->link_speed = ETH_SPEED_NUM_40G;
2946                 }
2947                 break;
2948         case I40E_REG_SPEED_4:
2949                 if (hw->mac.type == I40E_MAC_X722)
2950                         link->link_speed = ETH_SPEED_NUM_10G;
2951                 else
2952                         link->link_speed = ETH_SPEED_NUM_20G;
2953                 break;
2954         default:
2955                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2956                 break;
2957         }
2958 }
2959
2960 static __rte_always_inline void
2961 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2962         bool enable_lse, int wait_to_complete)
2963 {
2964 #define CHECK_INTERVAL             100  /* 100ms */
2965 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2966         uint32_t rep_cnt = MAX_REPEAT_TIME;
2967         struct i40e_link_status link_status;
2968         int status;
2969
2970         memset(&link_status, 0, sizeof(link_status));
2971
2972         do {
2973                 memset(&link_status, 0, sizeof(link_status));
2974
2975                 /* Get link status information from hardware */
2976                 status = i40e_aq_get_link_info(hw, enable_lse,
2977                                                 &link_status, NULL);
2978                 if (unlikely(status != I40E_SUCCESS)) {
2979                         link->link_speed = ETH_SPEED_NUM_NONE;
2980                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2981                         PMD_DRV_LOG(ERR, "Failed to get link info");
2982                         return;
2983                 }
2984
2985                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2986                 if (!wait_to_complete || link->link_status)
2987                         break;
2988
2989                 rte_delay_ms(CHECK_INTERVAL);
2990         } while (--rep_cnt);
2991
2992         /* Parse the link status */
2993         switch (link_status.link_speed) {
2994         case I40E_LINK_SPEED_100MB:
2995                 link->link_speed = ETH_SPEED_NUM_100M;
2996                 break;
2997         case I40E_LINK_SPEED_1GB:
2998                 link->link_speed = ETH_SPEED_NUM_1G;
2999                 break;
3000         case I40E_LINK_SPEED_10GB:
3001                 link->link_speed = ETH_SPEED_NUM_10G;
3002                 break;
3003         case I40E_LINK_SPEED_20GB:
3004                 link->link_speed = ETH_SPEED_NUM_20G;
3005                 break;
3006         case I40E_LINK_SPEED_25GB:
3007                 link->link_speed = ETH_SPEED_NUM_25G;
3008                 break;
3009         case I40E_LINK_SPEED_40GB:
3010                 link->link_speed = ETH_SPEED_NUM_40G;
3011                 break;
3012         default:
3013                 if (link->link_status)
3014                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3015                 else
3016                         link->link_speed = ETH_SPEED_NUM_NONE;
3017                 break;
3018         }
3019 }
3020
3021 int
3022 i40e_dev_link_update(struct rte_eth_dev *dev,
3023                      int wait_to_complete)
3024 {
3025         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3026         struct rte_eth_link link;
3027         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3028         int ret;
3029
3030         memset(&link, 0, sizeof(link));
3031
3032         /* i40e uses full duplex only */
3033         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3034         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3035                         ETH_LINK_SPEED_FIXED);
3036
3037         if (!wait_to_complete && !enable_lse)
3038                 update_link_reg(hw, &link);
3039         else
3040                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3041
3042         if (hw->switch_dev)
3043                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3044
3045         ret = rte_eth_linkstatus_set(dev, &link);
3046         i40e_notify_all_vfs_link_status(dev);
3047
3048         return ret;
3049 }
3050
3051 static void
3052 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3053                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3054                           uint64_t *stat, uint64_t *prev_stat)
3055 {
3056         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3057         /* enlarge the limitation when statistics counters overflowed */
3058         if (offset_loaded) {
3059                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3060                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3061                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3062         }
3063         *prev_stat = *stat;
3064 }
3065
3066 /* Get all the statistics of a VSI */
3067 void
3068 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3069 {
3070         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3071         struct i40e_eth_stats *nes = &vsi->eth_stats;
3072         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3073         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3074
3075         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3076                                   vsi->offset_loaded, &oes->rx_bytes,
3077                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3078         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3079                             vsi->offset_loaded, &oes->rx_unicast,
3080                             &nes->rx_unicast);
3081         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3082                             vsi->offset_loaded, &oes->rx_multicast,
3083                             &nes->rx_multicast);
3084         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3085                             vsi->offset_loaded, &oes->rx_broadcast,
3086                             &nes->rx_broadcast);
3087         /* exclude CRC bytes */
3088         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3089                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3090
3091         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3092                             &oes->rx_discards, &nes->rx_discards);
3093         /* GLV_REPC not supported */
3094         /* GLV_RMPC not supported */
3095         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3096                             &oes->rx_unknown_protocol,
3097                             &nes->rx_unknown_protocol);
3098         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3099                                   vsi->offset_loaded, &oes->tx_bytes,
3100                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3101         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3102                             vsi->offset_loaded, &oes->tx_unicast,
3103                             &nes->tx_unicast);
3104         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3105                             vsi->offset_loaded, &oes->tx_multicast,
3106                             &nes->tx_multicast);
3107         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3108                             vsi->offset_loaded,  &oes->tx_broadcast,
3109                             &nes->tx_broadcast);
3110         /* GLV_TDPC not supported */
3111         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3112                             &oes->tx_errors, &nes->tx_errors);
3113         vsi->offset_loaded = true;
3114
3115         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3116                     vsi->vsi_id);
3117         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3118         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3119         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3120         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3121         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3122         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3123                     nes->rx_unknown_protocol);
3124         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3125         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3126         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3127         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3128         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3129         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3130         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3131                     vsi->vsi_id);
3132 }
3133
3134 static void
3135 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3136 {
3137         unsigned int i;
3138         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3139         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3140
3141         /* Get rx/tx bytes of internal transfer packets */
3142         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3143                                   I40E_GLV_GORCL(hw->port),
3144                                   pf->offset_loaded,
3145                                   &pf->internal_stats_offset.rx_bytes,
3146                                   &pf->internal_stats.rx_bytes,
3147                                   &pf->internal_prev_rx_bytes);
3148         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3149                                   I40E_GLV_GOTCL(hw->port),
3150                                   pf->offset_loaded,
3151                                   &pf->internal_stats_offset.tx_bytes,
3152                                   &pf->internal_stats.tx_bytes,
3153                                   &pf->internal_prev_tx_bytes);
3154         /* Get total internal rx packet count */
3155         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3156                             I40E_GLV_UPRCL(hw->port),
3157                             pf->offset_loaded,
3158                             &pf->internal_stats_offset.rx_unicast,
3159                             &pf->internal_stats.rx_unicast);
3160         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3161                             I40E_GLV_MPRCL(hw->port),
3162                             pf->offset_loaded,
3163                             &pf->internal_stats_offset.rx_multicast,
3164                             &pf->internal_stats.rx_multicast);
3165         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3166                             I40E_GLV_BPRCL(hw->port),
3167                             pf->offset_loaded,
3168                             &pf->internal_stats_offset.rx_broadcast,
3169                             &pf->internal_stats.rx_broadcast);
3170         /* Get total internal tx packet count */
3171         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3172                             I40E_GLV_UPTCL(hw->port),
3173                             pf->offset_loaded,
3174                             &pf->internal_stats_offset.tx_unicast,
3175                             &pf->internal_stats.tx_unicast);
3176         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3177                             I40E_GLV_MPTCL(hw->port),
3178                             pf->offset_loaded,
3179                             &pf->internal_stats_offset.tx_multicast,
3180                             &pf->internal_stats.tx_multicast);
3181         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3182                             I40E_GLV_BPTCL(hw->port),
3183                             pf->offset_loaded,
3184                             &pf->internal_stats_offset.tx_broadcast,
3185                             &pf->internal_stats.tx_broadcast);
3186
3187         /* exclude CRC size */
3188         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3189                 pf->internal_stats.rx_multicast +
3190                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3191
3192         /* Get statistics of struct i40e_eth_stats */
3193         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3194                                   I40E_GLPRT_GORCL(hw->port),
3195                                   pf->offset_loaded, &os->eth.rx_bytes,
3196                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3197         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3198                             I40E_GLPRT_UPRCL(hw->port),
3199                             pf->offset_loaded, &os->eth.rx_unicast,
3200                             &ns->eth.rx_unicast);
3201         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3202                             I40E_GLPRT_MPRCL(hw->port),
3203                             pf->offset_loaded, &os->eth.rx_multicast,
3204                             &ns->eth.rx_multicast);
3205         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3206                             I40E_GLPRT_BPRCL(hw->port),
3207                             pf->offset_loaded, &os->eth.rx_broadcast,
3208                             &ns->eth.rx_broadcast);
3209         /* Workaround: CRC size should not be included in byte statistics,
3210          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3211          * packet.
3212          */
3213         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3214                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3215
3216         /* exclude internal rx bytes
3217          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3218          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3219          * value.
3220          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3221          */
3222         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3223                 ns->eth.rx_bytes = 0;
3224         else
3225                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3226
3227         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3228                 ns->eth.rx_unicast = 0;
3229         else
3230                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3231
3232         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3233                 ns->eth.rx_multicast = 0;
3234         else
3235                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3236
3237         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3238                 ns->eth.rx_broadcast = 0;
3239         else
3240                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3241
3242         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3243                             pf->offset_loaded, &os->eth.rx_discards,
3244                             &ns->eth.rx_discards);
3245         /* GLPRT_REPC not supported */
3246         /* GLPRT_RMPC not supported */
3247         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3248                             pf->offset_loaded,
3249                             &os->eth.rx_unknown_protocol,
3250                             &ns->eth.rx_unknown_protocol);
3251         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3252                                   I40E_GLPRT_GOTCL(hw->port),
3253                                   pf->offset_loaded, &os->eth.tx_bytes,
3254                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3255         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3256                             I40E_GLPRT_UPTCL(hw->port),
3257                             pf->offset_loaded, &os->eth.tx_unicast,
3258                             &ns->eth.tx_unicast);
3259         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3260                             I40E_GLPRT_MPTCL(hw->port),
3261                             pf->offset_loaded, &os->eth.tx_multicast,
3262                             &ns->eth.tx_multicast);
3263         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3264                             I40E_GLPRT_BPTCL(hw->port),
3265                             pf->offset_loaded, &os->eth.tx_broadcast,
3266                             &ns->eth.tx_broadcast);
3267         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3268                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3269
3270         /* exclude internal tx bytes
3271          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3272          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3273          * value.
3274          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3275          */
3276         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3277                 ns->eth.tx_bytes = 0;
3278         else
3279                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3280
3281         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3282                 ns->eth.tx_unicast = 0;
3283         else
3284                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3285
3286         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3287                 ns->eth.tx_multicast = 0;
3288         else
3289                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3290
3291         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3292                 ns->eth.tx_broadcast = 0;
3293         else
3294                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3295
3296         /* GLPRT_TEPC not supported */
3297
3298         /* additional port specific stats */
3299         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3300                             pf->offset_loaded, &os->tx_dropped_link_down,
3301                             &ns->tx_dropped_link_down);
3302         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3303                             pf->offset_loaded, &os->crc_errors,
3304                             &ns->crc_errors);
3305         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3306                             pf->offset_loaded, &os->illegal_bytes,
3307                             &ns->illegal_bytes);
3308         /* GLPRT_ERRBC not supported */
3309         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3310                             pf->offset_loaded, &os->mac_local_faults,
3311                             &ns->mac_local_faults);
3312         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3313                             pf->offset_loaded, &os->mac_remote_faults,
3314                             &ns->mac_remote_faults);
3315         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3316                             pf->offset_loaded, &os->rx_length_errors,
3317                             &ns->rx_length_errors);
3318         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3319                             pf->offset_loaded, &os->link_xon_rx,
3320                             &ns->link_xon_rx);
3321         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3322                             pf->offset_loaded, &os->link_xoff_rx,
3323                             &ns->link_xoff_rx);
3324         for (i = 0; i < 8; i++) {
3325                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3326                                     pf->offset_loaded,
3327                                     &os->priority_xon_rx[i],
3328                                     &ns->priority_xon_rx[i]);
3329                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3330                                     pf->offset_loaded,
3331                                     &os->priority_xoff_rx[i],
3332                                     &ns->priority_xoff_rx[i]);
3333         }
3334         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3335                             pf->offset_loaded, &os->link_xon_tx,
3336                             &ns->link_xon_tx);
3337         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3338                             pf->offset_loaded, &os->link_xoff_tx,
3339                             &ns->link_xoff_tx);
3340         for (i = 0; i < 8; i++) {
3341                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3342                                     pf->offset_loaded,
3343                                     &os->priority_xon_tx[i],
3344                                     &ns->priority_xon_tx[i]);
3345                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3346                                     pf->offset_loaded,
3347                                     &os->priority_xoff_tx[i],
3348                                     &ns->priority_xoff_tx[i]);
3349                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3350                                     pf->offset_loaded,
3351                                     &os->priority_xon_2_xoff[i],
3352                                     &ns->priority_xon_2_xoff[i]);
3353         }
3354         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3355                             I40E_GLPRT_PRC64L(hw->port),
3356                             pf->offset_loaded, &os->rx_size_64,
3357                             &ns->rx_size_64);
3358         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3359                             I40E_GLPRT_PRC127L(hw->port),
3360                             pf->offset_loaded, &os->rx_size_127,
3361                             &ns->rx_size_127);
3362         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3363                             I40E_GLPRT_PRC255L(hw->port),
3364                             pf->offset_loaded, &os->rx_size_255,
3365                             &ns->rx_size_255);
3366         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3367                             I40E_GLPRT_PRC511L(hw->port),
3368                             pf->offset_loaded, &os->rx_size_511,
3369                             &ns->rx_size_511);
3370         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3371                             I40E_GLPRT_PRC1023L(hw->port),
3372                             pf->offset_loaded, &os->rx_size_1023,
3373                             &ns->rx_size_1023);
3374         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3375                             I40E_GLPRT_PRC1522L(hw->port),
3376                             pf->offset_loaded, &os->rx_size_1522,
3377                             &ns->rx_size_1522);
3378         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3379                             I40E_GLPRT_PRC9522L(hw->port),
3380                             pf->offset_loaded, &os->rx_size_big,
3381                             &ns->rx_size_big);
3382         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3383                             pf->offset_loaded, &os->rx_undersize,
3384                             &ns->rx_undersize);
3385         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3386                             pf->offset_loaded, &os->rx_fragments,
3387                             &ns->rx_fragments);
3388         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3389                             pf->offset_loaded, &os->rx_oversize,
3390                             &ns->rx_oversize);
3391         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3392                             pf->offset_loaded, &os->rx_jabber,
3393                             &ns->rx_jabber);
3394         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3395                             I40E_GLPRT_PTC64L(hw->port),
3396                             pf->offset_loaded, &os->tx_size_64,
3397                             &ns->tx_size_64);
3398         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3399                             I40E_GLPRT_PTC127L(hw->port),
3400                             pf->offset_loaded, &os->tx_size_127,
3401                             &ns->tx_size_127);
3402         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3403                             I40E_GLPRT_PTC255L(hw->port),
3404                             pf->offset_loaded, &os->tx_size_255,
3405                             &ns->tx_size_255);
3406         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3407                             I40E_GLPRT_PTC511L(hw->port),
3408                             pf->offset_loaded, &os->tx_size_511,
3409                             &ns->tx_size_511);
3410         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3411                             I40E_GLPRT_PTC1023L(hw->port),
3412                             pf->offset_loaded, &os->tx_size_1023,
3413                             &ns->tx_size_1023);
3414         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3415                             I40E_GLPRT_PTC1522L(hw->port),
3416                             pf->offset_loaded, &os->tx_size_1522,
3417                             &ns->tx_size_1522);
3418         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3419                             I40E_GLPRT_PTC9522L(hw->port),
3420                             pf->offset_loaded, &os->tx_size_big,
3421                             &ns->tx_size_big);
3422         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3423                            pf->offset_loaded,
3424                            &os->fd_sb_match, &ns->fd_sb_match);
3425         /* GLPRT_MSPDC not supported */
3426         /* GLPRT_XEC not supported */
3427
3428         pf->offset_loaded = true;
3429
3430         if (pf->main_vsi)
3431                 i40e_update_vsi_stats(pf->main_vsi);
3432 }
3433
3434 /* Get all statistics of a port */
3435 static int
3436 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3437 {
3438         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3439         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3440         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3441         struct i40e_vsi *vsi;
3442         unsigned i;
3443
3444         /* call read registers - updates values, now write them to struct */
3445         i40e_read_stats_registers(pf, hw);
3446
3447         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3448                         pf->main_vsi->eth_stats.rx_multicast +
3449                         pf->main_vsi->eth_stats.rx_broadcast -
3450                         pf->main_vsi->eth_stats.rx_discards;
3451         stats->opackets = ns->eth.tx_unicast +
3452                         ns->eth.tx_multicast +
3453                         ns->eth.tx_broadcast;
3454         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3455         stats->obytes   = ns->eth.tx_bytes;
3456         stats->oerrors  = ns->eth.tx_errors +
3457                         pf->main_vsi->eth_stats.tx_errors;
3458
3459         /* Rx Errors */
3460         stats->imissed  = ns->eth.rx_discards +
3461                         pf->main_vsi->eth_stats.rx_discards;
3462         stats->ierrors  = ns->crc_errors +
3463                         ns->rx_length_errors + ns->rx_undersize +
3464                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3465
3466         if (pf->vfs) {
3467                 for (i = 0; i < pf->vf_num; i++) {
3468                         vsi = pf->vfs[i].vsi;
3469                         i40e_update_vsi_stats(vsi);
3470
3471                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3472                                         vsi->eth_stats.rx_multicast +
3473                                         vsi->eth_stats.rx_broadcast -
3474                                         vsi->eth_stats.rx_discards);
3475                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3476                         stats->oerrors  += vsi->eth_stats.tx_errors;
3477                         stats->imissed  += vsi->eth_stats.rx_discards;
3478                 }
3479         }
3480
3481         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3482         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3483         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3484         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3485         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3486         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3487         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3488                     ns->eth.rx_unknown_protocol);
3489         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3490         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3491         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3492         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3493         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3494         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3495
3496         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3497                     ns->tx_dropped_link_down);
3498         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3499         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3500                     ns->illegal_bytes);
3501         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3502         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3503                     ns->mac_local_faults);
3504         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3505                     ns->mac_remote_faults);
3506         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3507                     ns->rx_length_errors);
3508         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3509         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3510         for (i = 0; i < 8; i++) {
3511                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3512                                 i, ns->priority_xon_rx[i]);
3513                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3514                                 i, ns->priority_xoff_rx[i]);
3515         }
3516         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3517         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3518         for (i = 0; i < 8; i++) {
3519                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3520                                 i, ns->priority_xon_tx[i]);
3521                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3522                                 i, ns->priority_xoff_tx[i]);
3523                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3524                                 i, ns->priority_xon_2_xoff[i]);
3525         }
3526         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3527         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3528         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3529         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3530         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3531         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3532         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3533         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3534         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3535         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3536         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3537         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3538         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3539         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3540         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3541         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3542         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3543         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3544         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3545                         ns->mac_short_packet_dropped);
3546         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3547                     ns->checksum_error);
3548         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3549         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3550         return 0;
3551 }
3552
3553 /* Reset the statistics */
3554 static int
3555 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3556 {
3557         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3558         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3559
3560         /* Mark PF and VSI stats to update the offset, aka "reset" */
3561         pf->offset_loaded = false;
3562         if (pf->main_vsi)
3563                 pf->main_vsi->offset_loaded = false;
3564
3565         /* read the stats, reading current register values into offset */
3566         i40e_read_stats_registers(pf, hw);
3567
3568         return 0;
3569 }
3570
3571 static uint32_t
3572 i40e_xstats_calc_num(void)
3573 {
3574         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3575                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3576                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3577 }
3578
3579 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3580                                      struct rte_eth_xstat_name *xstats_names,
3581                                      __rte_unused unsigned limit)
3582 {
3583         unsigned count = 0;
3584         unsigned i, prio;
3585
3586         if (xstats_names == NULL)
3587                 return i40e_xstats_calc_num();
3588
3589         /* Note: limit checked in rte_eth_xstats_names() */
3590
3591         /* Get stats from i40e_eth_stats struct */
3592         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3593                 strlcpy(xstats_names[count].name,
3594                         rte_i40e_stats_strings[i].name,
3595                         sizeof(xstats_names[count].name));
3596                 count++;
3597         }
3598
3599         /* Get individiual stats from i40e_hw_port struct */
3600         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3601                 strlcpy(xstats_names[count].name,
3602                         rte_i40e_hw_port_strings[i].name,
3603                         sizeof(xstats_names[count].name));
3604                 count++;
3605         }
3606
3607         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3608                 for (prio = 0; prio < 8; prio++) {
3609                         snprintf(xstats_names[count].name,
3610                                  sizeof(xstats_names[count].name),
3611                                  "rx_priority%u_%s", prio,
3612                                  rte_i40e_rxq_prio_strings[i].name);
3613                         count++;
3614                 }
3615         }
3616
3617         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3618                 for (prio = 0; prio < 8; prio++) {
3619                         snprintf(xstats_names[count].name,
3620                                  sizeof(xstats_names[count].name),
3621                                  "tx_priority%u_%s", prio,
3622                                  rte_i40e_txq_prio_strings[i].name);
3623                         count++;
3624                 }
3625         }
3626         return count;
3627 }
3628
3629 static int
3630 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3631                     unsigned n)
3632 {
3633         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3634         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3635         unsigned i, count, prio;
3636         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3637
3638         count = i40e_xstats_calc_num();
3639         if (n < count)
3640                 return count;
3641
3642         i40e_read_stats_registers(pf, hw);
3643
3644         if (xstats == NULL)
3645                 return 0;
3646
3647         count = 0;
3648
3649         /* Get stats from i40e_eth_stats struct */
3650         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3651                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3652                         rte_i40e_stats_strings[i].offset);
3653                 xstats[count].id = count;
3654                 count++;
3655         }
3656
3657         /* Get individiual stats from i40e_hw_port struct */
3658         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3659                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3660                         rte_i40e_hw_port_strings[i].offset);
3661                 xstats[count].id = count;
3662                 count++;
3663         }
3664
3665         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3666                 for (prio = 0; prio < 8; prio++) {
3667                         xstats[count].value =
3668                                 *(uint64_t *)(((char *)hw_stats) +
3669                                 rte_i40e_rxq_prio_strings[i].offset +
3670                                 (sizeof(uint64_t) * prio));
3671                         xstats[count].id = count;
3672                         count++;
3673                 }
3674         }
3675
3676         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3677                 for (prio = 0; prio < 8; prio++) {
3678                         xstats[count].value =
3679                                 *(uint64_t *)(((char *)hw_stats) +
3680                                 rte_i40e_txq_prio_strings[i].offset +
3681                                 (sizeof(uint64_t) * prio));
3682                         xstats[count].id = count;
3683                         count++;
3684                 }
3685         }
3686
3687         return count;
3688 }
3689
3690 static int
3691 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3692 {
3693         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3694         u32 full_ver;
3695         u8 ver, patch;
3696         u16 build;
3697         int ret;
3698
3699         full_ver = hw->nvm.oem_ver;
3700         ver = (u8)(full_ver >> 24);
3701         build = (u16)((full_ver >> 8) & 0xffff);
3702         patch = (u8)(full_ver & 0xff);
3703
3704         ret = snprintf(fw_version, fw_size,
3705                  "%d.%d%d 0x%08x %d.%d.%d",
3706                  ((hw->nvm.version >> 12) & 0xf),
3707                  ((hw->nvm.version >> 4) & 0xff),
3708                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3709                  ver, build, patch);
3710
3711         ret += 1; /* add the size of '\0' */
3712         if (fw_size < (u32)ret)
3713                 return ret;
3714         else
3715                 return 0;
3716 }
3717
3718 /*
3719  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3720  * the Rx data path does not hang if the FW LLDP is stopped.
3721  * return true if lldp need to stop
3722  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3723  */
3724 static bool
3725 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3726 {
3727         double nvm_ver;
3728         char ver_str[64] = {0};
3729         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3730
3731         i40e_fw_version_get(dev, ver_str, 64);
3732         nvm_ver = atof(ver_str);
3733         if ((hw->mac.type == I40E_MAC_X722 ||
3734              hw->mac.type == I40E_MAC_X722_VF) &&
3735              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3736                 return true;
3737         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3738                 return true;
3739
3740         return false;
3741 }
3742
3743 static int
3744 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3745 {
3746         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3747         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3748         struct i40e_vsi *vsi = pf->main_vsi;
3749         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3750
3751         dev_info->max_rx_queues = vsi->nb_qps;
3752         dev_info->max_tx_queues = vsi->nb_qps;
3753         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3754         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3755         dev_info->max_mac_addrs = vsi->max_macaddrs;
3756         dev_info->max_vfs = pci_dev->max_vfs;
3757         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3758         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3759         dev_info->rx_queue_offload_capa = 0;
3760         dev_info->rx_offload_capa =
3761                 DEV_RX_OFFLOAD_VLAN_STRIP |
3762                 DEV_RX_OFFLOAD_QINQ_STRIP |
3763                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3764                 DEV_RX_OFFLOAD_UDP_CKSUM |
3765                 DEV_RX_OFFLOAD_TCP_CKSUM |
3766                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3767                 DEV_RX_OFFLOAD_KEEP_CRC |
3768                 DEV_RX_OFFLOAD_SCATTER |
3769                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3770                 DEV_RX_OFFLOAD_VLAN_FILTER |
3771                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3772                 DEV_RX_OFFLOAD_RSS_HASH;
3773
3774         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3775         dev_info->tx_offload_capa =
3776                 DEV_TX_OFFLOAD_VLAN_INSERT |
3777                 DEV_TX_OFFLOAD_QINQ_INSERT |
3778                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3779                 DEV_TX_OFFLOAD_UDP_CKSUM |
3780                 DEV_TX_OFFLOAD_TCP_CKSUM |
3781                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3782                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3783                 DEV_TX_OFFLOAD_TCP_TSO |
3784                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3785                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3786                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3787                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3788                 DEV_TX_OFFLOAD_MULTI_SEGS |
3789                 dev_info->tx_queue_offload_capa;
3790         dev_info->dev_capa =
3791                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3792                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3793
3794         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3795                                                 sizeof(uint32_t);
3796         dev_info->reta_size = pf->hash_lut_size;
3797         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3798
3799         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3800                 .rx_thresh = {
3801                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3802                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3803                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3804                 },
3805                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3806                 .rx_drop_en = 0,
3807                 .offloads = 0,
3808         };
3809
3810         dev_info->default_txconf = (struct rte_eth_txconf) {
3811                 .tx_thresh = {
3812                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3813                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3814                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3815                 },
3816                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3817                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3818                 .offloads = 0,
3819         };
3820
3821         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3822                 .nb_max = I40E_MAX_RING_DESC,
3823                 .nb_min = I40E_MIN_RING_DESC,
3824                 .nb_align = I40E_ALIGN_RING_DESC,
3825         };
3826
3827         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3828                 .nb_max = I40E_MAX_RING_DESC,
3829                 .nb_min = I40E_MIN_RING_DESC,
3830                 .nb_align = I40E_ALIGN_RING_DESC,
3831                 .nb_seg_max = I40E_TX_MAX_SEG,
3832                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3833         };
3834
3835         if (pf->flags & I40E_FLAG_VMDQ) {
3836                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3837                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3838                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3839                                                 pf->max_nb_vmdq_vsi;
3840                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3841                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3842                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3843         }
3844
3845         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3846                 /* For XL710 */
3847                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3848                 dev_info->default_rxportconf.nb_queues = 2;
3849                 dev_info->default_txportconf.nb_queues = 2;
3850                 if (dev->data->nb_rx_queues == 1)
3851                         dev_info->default_rxportconf.ring_size = 2048;
3852                 else
3853                         dev_info->default_rxportconf.ring_size = 1024;
3854                 if (dev->data->nb_tx_queues == 1)
3855                         dev_info->default_txportconf.ring_size = 1024;
3856                 else
3857                         dev_info->default_txportconf.ring_size = 512;
3858
3859         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3860                 /* For XXV710 */
3861                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3862                 dev_info->default_rxportconf.nb_queues = 1;
3863                 dev_info->default_txportconf.nb_queues = 1;
3864                 dev_info->default_rxportconf.ring_size = 256;
3865                 dev_info->default_txportconf.ring_size = 256;
3866         } else {
3867                 /* For X710 */
3868                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3869                 dev_info->default_rxportconf.nb_queues = 1;
3870                 dev_info->default_txportconf.nb_queues = 1;
3871                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3872                         dev_info->default_rxportconf.ring_size = 512;
3873                         dev_info->default_txportconf.ring_size = 256;
3874                 } else {
3875                         dev_info->default_rxportconf.ring_size = 256;
3876                         dev_info->default_txportconf.ring_size = 256;
3877                 }
3878         }
3879         dev_info->default_rxportconf.burst_size = 32;
3880         dev_info->default_txportconf.burst_size = 32;
3881
3882         return 0;
3883 }
3884
3885 static int
3886 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3887 {
3888         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3889         struct i40e_vsi *vsi = pf->main_vsi;
3890         PMD_INIT_FUNC_TRACE();
3891
3892         if (on)
3893                 return i40e_vsi_add_vlan(vsi, vlan_id);
3894         else
3895                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3896 }
3897
3898 static int
3899 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3900                                 enum rte_vlan_type vlan_type,
3901                                 uint16_t tpid, int qinq)
3902 {
3903         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3904         uint64_t reg_r = 0;
3905         uint64_t reg_w = 0;
3906         uint16_t reg_id = 3;
3907         int ret;
3908
3909         if (qinq) {
3910                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3911                         reg_id = 2;
3912         }
3913
3914         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3915                                           &reg_r, NULL);
3916         if (ret != I40E_SUCCESS) {
3917                 PMD_DRV_LOG(ERR,
3918                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3919                            reg_id);
3920                 return -EIO;
3921         }
3922         PMD_DRV_LOG(DEBUG,
3923                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3924                     reg_id, reg_r);
3925
3926         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3927         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3928         if (reg_r == reg_w) {
3929                 PMD_DRV_LOG(DEBUG, "No need to write");
3930                 return 0;
3931         }
3932
3933         ret = i40e_aq_debug_write_global_register(hw,
3934                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3935                                            reg_w, NULL);
3936         if (ret != I40E_SUCCESS) {
3937                 PMD_DRV_LOG(ERR,
3938                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3939                             reg_id);
3940                 return -EIO;
3941         }
3942         PMD_DRV_LOG(DEBUG,
3943                     "Global register 0x%08x is changed with value 0x%08x",
3944                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3945
3946         return 0;
3947 }
3948
3949 static int
3950 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3951                    enum rte_vlan_type vlan_type,
3952                    uint16_t tpid)
3953 {
3954         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3955         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3956         int qinq = dev->data->dev_conf.rxmode.offloads &
3957                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3958         int ret = 0;
3959
3960         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3961              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3962             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3963                 PMD_DRV_LOG(ERR,
3964                             "Unsupported vlan type.");
3965                 return -EINVAL;
3966         }
3967
3968         if (pf->support_multi_driver) {
3969                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3970                 return -ENOTSUP;
3971         }
3972
3973         /* 802.1ad frames ability is added in NVM API 1.7*/
3974         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3975                 if (qinq) {
3976                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3977                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3978                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3979                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3980                 } else {
3981                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3982                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3983                 }
3984                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3985                 if (ret != I40E_SUCCESS) {
3986                         PMD_DRV_LOG(ERR,
3987                                     "Set switch config failed aq_err: %d",
3988                                     hw->aq.asq_last_status);
3989                         ret = -EIO;
3990                 }
3991         } else
3992                 /* If NVM API < 1.7, keep the register setting */
3993                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3994                                                       tpid, qinq);
3995
3996         return ret;
3997 }
3998
3999 /* Configure outer vlan stripping on or off in QinQ mode */
4000 static int
4001 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
4002 {
4003         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4004         int ret = I40E_SUCCESS;
4005         uint32_t reg;
4006
4007         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
4008                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
4009                 return -EINVAL;
4010         }
4011
4012         /* Configure for outer VLAN RX stripping */
4013         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4014
4015         if (on)
4016                 reg |= I40E_VSI_TSR_QINQ_STRIP;
4017         else
4018                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4019
4020         ret = i40e_aq_debug_write_register(hw,
4021                                                    I40E_VSI_TSR(vsi->vsi_id),
4022                                                    reg, NULL);
4023         if (ret < 0) {
4024                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4025                                     vsi->vsi_id);
4026                 return I40E_ERR_CONFIG;
4027         }
4028
4029         return ret;
4030 }
4031
4032 static int
4033 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4034 {
4035         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4036         struct i40e_vsi *vsi = pf->main_vsi;
4037         struct rte_eth_rxmode *rxmode;
4038
4039         rxmode = &dev->data->dev_conf.rxmode;
4040         if (mask & ETH_VLAN_FILTER_MASK) {
4041                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4042                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4043                 else
4044                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4045         }
4046
4047         if (mask & ETH_VLAN_STRIP_MASK) {
4048                 /* Enable or disable VLAN stripping */
4049                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4050                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4051                 else
4052                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4053         }
4054
4055         if (mask & ETH_VLAN_EXTEND_MASK) {
4056                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4057                         i40e_vsi_config_double_vlan(vsi, TRUE);
4058                         /* Set global registers with default ethertype. */
4059                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4060                                            RTE_ETHER_TYPE_VLAN);
4061                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4062                                            RTE_ETHER_TYPE_VLAN);
4063                 }
4064                 else
4065                         i40e_vsi_config_double_vlan(vsi, FALSE);
4066         }
4067
4068         if (mask & ETH_QINQ_STRIP_MASK) {
4069                 /* Enable or disable outer VLAN stripping */
4070                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4071                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4072                 else
4073                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4074         }
4075
4076         return 0;
4077 }
4078
4079 static void
4080 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4081                           __rte_unused uint16_t queue,
4082                           __rte_unused int on)
4083 {
4084         PMD_INIT_FUNC_TRACE();
4085 }
4086
4087 static int
4088 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4089 {
4090         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4091         struct i40e_vsi *vsi = pf->main_vsi;
4092         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4093         struct i40e_vsi_vlan_pvid_info info;
4094
4095         memset(&info, 0, sizeof(info));
4096         info.on = on;
4097         if (info.on)
4098                 info.config.pvid = pvid;
4099         else {
4100                 info.config.reject.tagged =
4101                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4102                 info.config.reject.untagged =
4103                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4104         }
4105
4106         return i40e_vsi_vlan_pvid_set(vsi, &info);
4107 }
4108
4109 static int
4110 i40e_dev_led_on(struct rte_eth_dev *dev)
4111 {
4112         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4113         uint32_t mode = i40e_led_get(hw);
4114
4115         if (mode == 0)
4116                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4117
4118         return 0;
4119 }
4120
4121 static int
4122 i40e_dev_led_off(struct rte_eth_dev *dev)
4123 {
4124         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4125         uint32_t mode = i40e_led_get(hw);
4126
4127         if (mode != 0)
4128                 i40e_led_set(hw, 0, false);
4129
4130         return 0;
4131 }
4132
4133 static int
4134 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4135 {
4136         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4137         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4138
4139         fc_conf->pause_time = pf->fc_conf.pause_time;
4140
4141         /* read out from register, in case they are modified by other port */
4142         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4143                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4144         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4145                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4146
4147         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4148         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4149
4150          /* Return current mode according to actual setting*/
4151         switch (hw->fc.current_mode) {
4152         case I40E_FC_FULL:
4153                 fc_conf->mode = RTE_FC_FULL;
4154                 break;
4155         case I40E_FC_TX_PAUSE:
4156                 fc_conf->mode = RTE_FC_TX_PAUSE;
4157                 break;
4158         case I40E_FC_RX_PAUSE:
4159                 fc_conf->mode = RTE_FC_RX_PAUSE;
4160                 break;
4161         case I40E_FC_NONE:
4162         default:
4163                 fc_conf->mode = RTE_FC_NONE;
4164         };
4165
4166         return 0;
4167 }
4168
4169 static int
4170 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4171 {
4172         uint32_t mflcn_reg, fctrl_reg, reg;
4173         uint32_t max_high_water;
4174         uint8_t i, aq_failure;
4175         int err;
4176         struct i40e_hw *hw;
4177         struct i40e_pf *pf;
4178         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4179                 [RTE_FC_NONE] = I40E_FC_NONE,
4180                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4181                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4182                 [RTE_FC_FULL] = I40E_FC_FULL
4183         };
4184
4185         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4186
4187         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4188         if ((fc_conf->high_water > max_high_water) ||
4189                         (fc_conf->high_water < fc_conf->low_water)) {
4190                 PMD_INIT_LOG(ERR,
4191                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4192                         max_high_water);
4193                 return -EINVAL;
4194         }
4195
4196         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4197         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4198         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4199
4200         pf->fc_conf.pause_time = fc_conf->pause_time;
4201         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4202         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4203
4204         PMD_INIT_FUNC_TRACE();
4205
4206         /* All the link flow control related enable/disable register
4207          * configuration is handle by the F/W
4208          */
4209         err = i40e_set_fc(hw, &aq_failure, true);
4210         if (err < 0)
4211                 return -ENOSYS;
4212
4213         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4214                 /* Configure flow control refresh threshold,
4215                  * the value for stat_tx_pause_refresh_timer[8]
4216                  * is used for global pause operation.
4217                  */
4218
4219                 I40E_WRITE_REG(hw,
4220                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4221                                pf->fc_conf.pause_time);
4222
4223                 /* configure the timer value included in transmitted pause
4224                  * frame,
4225                  * the value for stat_tx_pause_quanta[8] is used for global
4226                  * pause operation
4227                  */
4228                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4229                                pf->fc_conf.pause_time);
4230
4231                 fctrl_reg = I40E_READ_REG(hw,
4232                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4233
4234                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4235                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4236                 else
4237                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4238
4239                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4240                                fctrl_reg);
4241         } else {
4242                 /* Configure pause time (2 TCs per register) */
4243                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4244                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4245                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4246
4247                 /* Configure flow control refresh threshold value */
4248                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4249                                pf->fc_conf.pause_time / 2);
4250
4251                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4252
4253                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4254                  *depending on configuration
4255                  */
4256                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4257                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4258                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4259                 } else {
4260                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4261                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4262                 }
4263
4264                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4265         }
4266
4267         if (!pf->support_multi_driver) {
4268                 /* config water marker both based on the packets and bytes */
4269                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4270                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4271                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4272                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4273                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4274                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4275                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4276                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4277                                   << I40E_KILOSHIFT);
4278                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4279                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4280                                    << I40E_KILOSHIFT);
4281         } else {
4282                 PMD_DRV_LOG(ERR,
4283                             "Water marker configuration is not supported.");
4284         }
4285
4286         I40E_WRITE_FLUSH(hw);
4287
4288         return 0;
4289 }
4290
4291 static int
4292 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4293                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4294 {
4295         PMD_INIT_FUNC_TRACE();
4296
4297         return -ENOSYS;
4298 }
4299
4300 /* Add a MAC address, and update filters */
4301 static int
4302 i40e_macaddr_add(struct rte_eth_dev *dev,
4303                  struct rte_ether_addr *mac_addr,
4304                  __rte_unused uint32_t index,
4305                  uint32_t pool)
4306 {
4307         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4308         struct i40e_mac_filter_info mac_filter;
4309         struct i40e_vsi *vsi;
4310         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4311         int ret;
4312
4313         /* If VMDQ not enabled or configured, return */
4314         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4315                           !pf->nb_cfg_vmdq_vsi)) {
4316                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4317                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4318                         pool);
4319                 return -ENOTSUP;
4320         }
4321
4322         if (pool > pf->nb_cfg_vmdq_vsi) {
4323                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4324                                 pool, pf->nb_cfg_vmdq_vsi);
4325                 return -EINVAL;
4326         }
4327
4328         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4329         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4330                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4331         else
4332                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
4333
4334         if (pool == 0)
4335                 vsi = pf->main_vsi;
4336         else
4337                 vsi = pf->vmdq[pool - 1].vsi;
4338
4339         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4340         if (ret != I40E_SUCCESS) {
4341                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4342                 return -ENODEV;
4343         }
4344         return 0;
4345 }
4346
4347 /* Remove a MAC address, and update filters */
4348 static void
4349 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4350 {
4351         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4352         struct i40e_vsi *vsi;
4353         struct rte_eth_dev_data *data = dev->data;
4354         struct rte_ether_addr *macaddr;
4355         int ret;
4356         uint32_t i;
4357         uint64_t pool_sel;
4358
4359         macaddr = &(data->mac_addrs[index]);
4360
4361         pool_sel = dev->data->mac_pool_sel[index];
4362
4363         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4364                 if (pool_sel & (1ULL << i)) {
4365                         if (i == 0)
4366                                 vsi = pf->main_vsi;
4367                         else {
4368                                 /* No VMDQ pool enabled or configured */
4369                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4370                                         (i > pf->nb_cfg_vmdq_vsi)) {
4371                                         PMD_DRV_LOG(ERR,
4372                                                 "No VMDQ pool enabled/configured");
4373                                         return;
4374                                 }
4375                                 vsi = pf->vmdq[i - 1].vsi;
4376                         }
4377                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4378
4379                         if (ret) {
4380                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4381                                 return;
4382                         }
4383                 }
4384         }
4385 }
4386
4387 /* Set perfect match or hash match of MAC and VLAN for a VF */
4388 static int
4389 i40e_vf_mac_filter_set(struct i40e_pf *pf,
4390                  struct rte_eth_mac_filter *filter,
4391                  bool add)
4392 {
4393         struct i40e_hw *hw;
4394         struct i40e_mac_filter_info mac_filter;
4395         struct rte_ether_addr old_mac;
4396         struct rte_ether_addr *new_mac;
4397         struct i40e_pf_vf *vf = NULL;
4398         uint16_t vf_id;
4399         int ret;
4400
4401         if (pf == NULL) {
4402                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
4403                 return -EINVAL;
4404         }
4405         hw = I40E_PF_TO_HW(pf);
4406
4407         if (filter == NULL) {
4408                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
4409                 return -EINVAL;
4410         }
4411
4412         new_mac = &filter->mac_addr;
4413
4414         if (rte_is_zero_ether_addr(new_mac)) {
4415                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
4416                 return -EINVAL;
4417         }
4418
4419         vf_id = filter->dst_id;
4420
4421         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
4422                 PMD_DRV_LOG(ERR, "Invalid argument.");
4423                 return -EINVAL;
4424         }
4425         vf = &pf->vfs[vf_id];
4426
4427         if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) {
4428                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
4429                 return -EINVAL;
4430         }
4431
4432         if (add) {
4433                 rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
4434                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
4435                                 RTE_ETHER_ADDR_LEN);
4436                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
4437                                  RTE_ETHER_ADDR_LEN);
4438
4439                 mac_filter.filter_type = filter->filter_type;
4440                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
4441                 if (ret != I40E_SUCCESS) {
4442                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
4443                         return -1;
4444                 }
4445                 rte_ether_addr_copy(new_mac, &pf->dev_addr);
4446         } else {
4447                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
4448                                 RTE_ETHER_ADDR_LEN);
4449                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
4450                 if (ret != I40E_SUCCESS) {
4451                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
4452                         return -1;
4453                 }
4454
4455                 /* Clear device address as it has been removed */
4456                 if (rte_is_same_ether_addr(&pf->dev_addr, new_mac))
4457                         memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr));
4458         }
4459
4460         return 0;
4461 }
4462
4463 /* MAC filter handle */
4464 static int
4465 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4466                 void *arg)
4467 {
4468         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4469         struct rte_eth_mac_filter *filter;
4470         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4471         int ret = I40E_NOT_SUPPORTED;
4472
4473         filter = (struct rte_eth_mac_filter *)(arg);
4474
4475         switch (filter_op) {
4476         case RTE_ETH_FILTER_NOP:
4477                 ret = I40E_SUCCESS;
4478                 break;
4479         case RTE_ETH_FILTER_ADD:
4480                 i40e_pf_disable_irq0(hw);
4481                 if (filter->is_vf)
4482                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
4483                 i40e_pf_enable_irq0(hw);
4484                 break;
4485         case RTE_ETH_FILTER_DELETE:
4486                 i40e_pf_disable_irq0(hw);
4487                 if (filter->is_vf)
4488                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
4489                 i40e_pf_enable_irq0(hw);
4490                 break;
4491         default:
4492                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4493                 ret = I40E_ERR_PARAM;
4494                 break;
4495         }
4496
4497         return ret;
4498 }
4499
4500 static int
4501 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4502 {
4503         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4504         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4505         uint32_t reg;
4506         int ret;
4507
4508         if (!lut)
4509                 return -EINVAL;
4510
4511         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4512                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4513                                           vsi->type != I40E_VSI_SRIOV,
4514                                           lut, lut_size);
4515                 if (ret) {
4516                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4517                         return ret;
4518                 }
4519         } else {
4520                 uint32_t *lut_dw = (uint32_t *)lut;
4521                 uint16_t i, lut_size_dw = lut_size / 4;
4522
4523                 if (vsi->type == I40E_VSI_SRIOV) {
4524                         for (i = 0; i <= lut_size_dw; i++) {
4525                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4526                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4527                         }
4528                 } else {
4529                         for (i = 0; i < lut_size_dw; i++)
4530                                 lut_dw[i] = I40E_READ_REG(hw,
4531                                                           I40E_PFQF_HLUT(i));
4532                 }
4533         }
4534
4535         return 0;
4536 }
4537
4538 int
4539 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4540 {
4541         struct i40e_pf *pf;
4542         struct i40e_hw *hw;
4543         int ret;
4544
4545         if (!vsi || !lut)
4546                 return -EINVAL;
4547
4548         pf = I40E_VSI_TO_PF(vsi);
4549         hw = I40E_VSI_TO_HW(vsi);
4550
4551         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4552                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4553                                           vsi->type != I40E_VSI_SRIOV,
4554                                           lut, lut_size);
4555                 if (ret) {
4556                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4557                         return ret;
4558                 }
4559         } else {
4560                 uint32_t *lut_dw = (uint32_t *)lut;
4561                 uint16_t i, lut_size_dw = lut_size / 4;
4562
4563                 if (vsi->type == I40E_VSI_SRIOV) {
4564                         for (i = 0; i < lut_size_dw; i++)
4565                                 I40E_WRITE_REG(
4566                                         hw,
4567                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4568                                         lut_dw[i]);
4569                 } else {
4570                         for (i = 0; i < lut_size_dw; i++)
4571                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4572                                                lut_dw[i]);
4573                 }
4574                 I40E_WRITE_FLUSH(hw);
4575         }
4576
4577         return 0;
4578 }
4579
4580 static int
4581 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4582                          struct rte_eth_rss_reta_entry64 *reta_conf,
4583                          uint16_t reta_size)
4584 {
4585         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4586         uint16_t i, lut_size = pf->hash_lut_size;
4587         uint16_t idx, shift;
4588         uint8_t *lut;
4589         int ret;
4590
4591         if (reta_size != lut_size ||
4592                 reta_size > ETH_RSS_RETA_SIZE_512) {
4593                 PMD_DRV_LOG(ERR,
4594                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4595                         reta_size, lut_size);
4596                 return -EINVAL;
4597         }
4598
4599         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4600         if (!lut) {
4601                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4602                 return -ENOMEM;
4603         }
4604         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4605         if (ret)
4606                 goto out;
4607         for (i = 0; i < reta_size; i++) {
4608                 idx = i / RTE_RETA_GROUP_SIZE;
4609                 shift = i % RTE_RETA_GROUP_SIZE;
4610                 if (reta_conf[idx].mask & (1ULL << shift))
4611                         lut[i] = reta_conf[idx].reta[shift];
4612         }
4613         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4614
4615         pf->adapter->rss_reta_updated = 1;
4616
4617 out:
4618         rte_free(lut);
4619
4620         return ret;
4621 }
4622
4623 static int
4624 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4625                         struct rte_eth_rss_reta_entry64 *reta_conf,
4626                         uint16_t reta_size)
4627 {
4628         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4629         uint16_t i, lut_size = pf->hash_lut_size;
4630         uint16_t idx, shift;
4631         uint8_t *lut;
4632         int ret;
4633
4634         if (reta_size != lut_size ||
4635                 reta_size > ETH_RSS_RETA_SIZE_512) {
4636                 PMD_DRV_LOG(ERR,
4637                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4638                         reta_size, lut_size);
4639                 return -EINVAL;
4640         }
4641
4642         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4643         if (!lut) {
4644                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4645                 return -ENOMEM;
4646         }
4647
4648         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4649         if (ret)
4650                 goto out;
4651         for (i = 0; i < reta_size; i++) {
4652                 idx = i / RTE_RETA_GROUP_SIZE;
4653                 shift = i % RTE_RETA_GROUP_SIZE;
4654                 if (reta_conf[idx].mask & (1ULL << shift))
4655                         reta_conf[idx].reta[shift] = lut[i];
4656         }
4657
4658 out:
4659         rte_free(lut);
4660
4661         return ret;
4662 }
4663
4664 /**
4665  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4666  * @hw:   pointer to the HW structure
4667  * @mem:  pointer to mem struct to fill out
4668  * @size: size of memory requested
4669  * @alignment: what to align the allocation to
4670  **/
4671 enum i40e_status_code
4672 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4673                         struct i40e_dma_mem *mem,
4674                         u64 size,
4675                         u32 alignment)
4676 {
4677         const struct rte_memzone *mz = NULL;
4678         char z_name[RTE_MEMZONE_NAMESIZE];
4679
4680         if (!mem)
4681                 return I40E_ERR_PARAM;
4682
4683         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4684         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4685                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4686         if (!mz)
4687                 return I40E_ERR_NO_MEMORY;
4688
4689         mem->size = size;
4690         mem->va = mz->addr;
4691         mem->pa = mz->iova;
4692         mem->zone = (const void *)mz;
4693         PMD_DRV_LOG(DEBUG,
4694                 "memzone %s allocated with physical address: %"PRIu64,
4695                 mz->name, mem->pa);
4696
4697         return I40E_SUCCESS;
4698 }
4699
4700 /**
4701  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4702  * @hw:   pointer to the HW structure
4703  * @mem:  ptr to mem struct to free
4704  **/
4705 enum i40e_status_code
4706 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4707                     struct i40e_dma_mem *mem)
4708 {
4709         if (!mem)
4710                 return I40E_ERR_PARAM;
4711
4712         PMD_DRV_LOG(DEBUG,
4713                 "memzone %s to be freed with physical address: %"PRIu64,
4714                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4715         rte_memzone_free((const struct rte_memzone *)mem->zone);
4716         mem->zone = NULL;
4717         mem->va = NULL;
4718         mem->pa = (u64)0;
4719
4720         return I40E_SUCCESS;
4721 }
4722
4723 /**
4724  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4725  * @hw:   pointer to the HW structure
4726  * @mem:  pointer to mem struct to fill out
4727  * @size: size of memory requested
4728  **/
4729 enum i40e_status_code
4730 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4731                          struct i40e_virt_mem *mem,
4732                          u32 size)
4733 {
4734         if (!mem)
4735                 return I40E_ERR_PARAM;
4736
4737         mem->size = size;
4738         mem->va = rte_zmalloc("i40e", size, 0);
4739
4740         if (mem->va)
4741                 return I40E_SUCCESS;
4742         else
4743                 return I40E_ERR_NO_MEMORY;
4744 }
4745
4746 /**
4747  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4748  * @hw:   pointer to the HW structure
4749  * @mem:  pointer to mem struct to free
4750  **/
4751 enum i40e_status_code
4752 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4753                      struct i40e_virt_mem *mem)
4754 {
4755         if (!mem)
4756                 return I40E_ERR_PARAM;
4757
4758         rte_free(mem->va);
4759         mem->va = NULL;
4760
4761         return I40E_SUCCESS;
4762 }
4763
4764 void
4765 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4766 {
4767         rte_spinlock_init(&sp->spinlock);
4768 }
4769
4770 void
4771 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4772 {
4773         rte_spinlock_lock(&sp->spinlock);
4774 }
4775
4776 void
4777 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4778 {
4779         rte_spinlock_unlock(&sp->spinlock);
4780 }
4781
4782 void
4783 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4784 {
4785         return;
4786 }
4787
4788 /**
4789  * Get the hardware capabilities, which will be parsed
4790  * and saved into struct i40e_hw.
4791  */
4792 static int
4793 i40e_get_cap(struct i40e_hw *hw)
4794 {
4795         struct i40e_aqc_list_capabilities_element_resp *buf;
4796         uint16_t len, size = 0;
4797         int ret;
4798
4799         /* Calculate a huge enough buff for saving response data temporarily */
4800         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4801                                                 I40E_MAX_CAP_ELE_NUM;
4802         buf = rte_zmalloc("i40e", len, 0);
4803         if (!buf) {
4804                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4805                 return I40E_ERR_NO_MEMORY;
4806         }
4807
4808         /* Get, parse the capabilities and save it to hw */
4809         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4810                         i40e_aqc_opc_list_func_capabilities, NULL);
4811         if (ret != I40E_SUCCESS)
4812                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4813
4814         /* Free the temporary buffer after being used */
4815         rte_free(buf);
4816
4817         return ret;
4818 }
4819
4820 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4821
4822 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4823                 const char *value,
4824                 void *opaque)
4825 {
4826         struct i40e_pf *pf;
4827         unsigned long num;
4828         char *end;
4829
4830         pf = (struct i40e_pf *)opaque;
4831         RTE_SET_USED(key);
4832
4833         errno = 0;
4834         num = strtoul(value, &end, 0);
4835         if (errno != 0 || end == value || *end != 0) {
4836                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4837                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4838                 return -(EINVAL);
4839         }
4840
4841         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4842                 pf->vf_nb_qp_max = (uint16_t)num;
4843         else
4844                 /* here return 0 to make next valid same argument work */
4845                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4846                             "power of 2 and equal or less than 16 !, Now it is "
4847                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4848
4849         return 0;
4850 }
4851
4852 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4853 {
4854         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4855         struct rte_kvargs *kvlist;
4856         int kvargs_count;
4857
4858         /* set default queue number per VF as 4 */
4859         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4860
4861         if (dev->device->devargs == NULL)
4862                 return 0;
4863
4864         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4865         if (kvlist == NULL)
4866                 return -(EINVAL);
4867
4868         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4869         if (!kvargs_count) {
4870                 rte_kvargs_free(kvlist);
4871                 return 0;
4872         }
4873
4874         if (kvargs_count > 1)
4875                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4876                             "the first invalid or last valid one is used !",
4877                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4878
4879         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4880                            i40e_pf_parse_vf_queue_number_handler, pf);
4881
4882         rte_kvargs_free(kvlist);
4883
4884         return 0;
4885 }
4886
4887 static int
4888 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4889 {
4890         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4891         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4892         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4893         uint16_t qp_count = 0, vsi_count = 0;
4894
4895         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4896                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4897                 return -EINVAL;
4898         }
4899
4900         i40e_pf_config_vf_rxq_number(dev);
4901
4902         /* Add the parameter init for LFC */
4903         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4904         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4905         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4906
4907         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4908         pf->max_num_vsi = hw->func_caps.num_vsis;
4909         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4910         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4911
4912         /* FDir queue/VSI allocation */
4913         pf->fdir_qp_offset = 0;
4914         if (hw->func_caps.fd) {
4915                 pf->flags |= I40E_FLAG_FDIR;
4916                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4917         } else {
4918                 pf->fdir_nb_qps = 0;
4919         }
4920         qp_count += pf->fdir_nb_qps;
4921         vsi_count += 1;
4922
4923         /* LAN queue/VSI allocation */
4924         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4925         if (!hw->func_caps.rss) {
4926                 pf->lan_nb_qps = 1;
4927         } else {
4928                 pf->flags |= I40E_FLAG_RSS;
4929                 if (hw->mac.type == I40E_MAC_X722)
4930                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4931                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4932         }
4933         qp_count += pf->lan_nb_qps;
4934         vsi_count += 1;
4935
4936         /* VF queue/VSI allocation */
4937         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4938         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4939                 pf->flags |= I40E_FLAG_SRIOV;
4940                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4941                 pf->vf_num = pci_dev->max_vfs;
4942                 PMD_DRV_LOG(DEBUG,
4943                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4944                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4945         } else {
4946                 pf->vf_nb_qps = 0;
4947                 pf->vf_num = 0;
4948         }
4949         qp_count += pf->vf_nb_qps * pf->vf_num;
4950         vsi_count += pf->vf_num;
4951
4952         /* VMDq queue/VSI allocation */
4953         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4954         pf->vmdq_nb_qps = 0;
4955         pf->max_nb_vmdq_vsi = 0;
4956         if (hw->func_caps.vmdq) {
4957                 if (qp_count < hw->func_caps.num_tx_qp &&
4958                         vsi_count < hw->func_caps.num_vsis) {
4959                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4960                                 qp_count) / pf->vmdq_nb_qp_max;
4961
4962                         /* Limit the maximum number of VMDq vsi to the maximum
4963                          * ethdev can support
4964                          */
4965                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4966                                 hw->func_caps.num_vsis - vsi_count);
4967                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4968                                 ETH_64_POOLS);
4969                         if (pf->max_nb_vmdq_vsi) {
4970                                 pf->flags |= I40E_FLAG_VMDQ;
4971                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4972                                 PMD_DRV_LOG(DEBUG,
4973                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4974                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4975                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4976                         } else {
4977                                 PMD_DRV_LOG(INFO,
4978                                         "No enough queues left for VMDq");
4979                         }
4980                 } else {
4981                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4982                 }
4983         }
4984         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4985         vsi_count += pf->max_nb_vmdq_vsi;
4986
4987         if (hw->func_caps.dcb)
4988                 pf->flags |= I40E_FLAG_DCB;
4989
4990         if (qp_count > hw->func_caps.num_tx_qp) {
4991                 PMD_DRV_LOG(ERR,
4992                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4993                         qp_count, hw->func_caps.num_tx_qp);
4994                 return -EINVAL;
4995         }
4996         if (vsi_count > hw->func_caps.num_vsis) {
4997                 PMD_DRV_LOG(ERR,
4998                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4999                         vsi_count, hw->func_caps.num_vsis);
5000                 return -EINVAL;
5001         }
5002
5003         return 0;
5004 }
5005
5006 static int
5007 i40e_pf_get_switch_config(struct i40e_pf *pf)
5008 {
5009         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5010         struct i40e_aqc_get_switch_config_resp *switch_config;
5011         struct i40e_aqc_switch_config_element_resp *element;
5012         uint16_t start_seid = 0, num_reported;
5013         int ret;
5014
5015         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
5016                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
5017         if (!switch_config) {
5018                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
5019                 return -ENOMEM;
5020         }
5021
5022         /* Get the switch configurations */
5023         ret = i40e_aq_get_switch_config(hw, switch_config,
5024                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
5025         if (ret != I40E_SUCCESS) {
5026                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
5027                 goto fail;
5028         }
5029         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
5030         if (num_reported != 1) { /* The number should be 1 */
5031                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
5032                 goto fail;
5033         }
5034
5035         /* Parse the switch configuration elements */
5036         element = &(switch_config->element[0]);
5037         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
5038                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
5039                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
5040         } else
5041                 PMD_DRV_LOG(INFO, "Unknown element type");
5042
5043 fail:
5044         rte_free(switch_config);
5045
5046         return ret;
5047 }
5048
5049 static int
5050 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
5051                         uint32_t num)
5052 {
5053         struct pool_entry *entry;
5054
5055         if (pool == NULL || num == 0)
5056                 return -EINVAL;
5057
5058         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
5059         if (entry == NULL) {
5060                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
5061                 return -ENOMEM;
5062         }
5063
5064         /* queue heap initialize */
5065         pool->num_free = num;
5066         pool->num_alloc = 0;
5067         pool->base = base;
5068         LIST_INIT(&pool->alloc_list);
5069         LIST_INIT(&pool->free_list);
5070
5071         /* Initialize element  */
5072         entry->base = 0;
5073         entry->len = num;
5074
5075         LIST_INSERT_HEAD(&pool->free_list, entry, next);
5076         return 0;
5077 }
5078
5079 static void
5080 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
5081 {
5082         struct pool_entry *entry, *next_entry;
5083
5084         if (pool == NULL)
5085                 return;
5086
5087         for (entry = LIST_FIRST(&pool->alloc_list);
5088                         entry && (next_entry = LIST_NEXT(entry, next), 1);
5089                         entry = next_entry) {
5090                 LIST_REMOVE(entry, next);
5091                 rte_free(entry);
5092         }
5093
5094         for (entry = LIST_FIRST(&pool->free_list);
5095                         entry && (next_entry = LIST_NEXT(entry, next), 1);
5096                         entry = next_entry) {
5097                 LIST_REMOVE(entry, next);
5098                 rte_free(entry);
5099         }
5100
5101         pool->num_free = 0;
5102         pool->num_alloc = 0;
5103         pool->base = 0;
5104         LIST_INIT(&pool->alloc_list);
5105         LIST_INIT(&pool->free_list);
5106 }
5107
5108 static int
5109 i40e_res_pool_free(struct i40e_res_pool_info *pool,
5110                        uint32_t base)
5111 {
5112         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
5113         uint32_t pool_offset;
5114         uint16_t len;
5115         int insert;
5116
5117         if (pool == NULL) {
5118                 PMD_DRV_LOG(ERR, "Invalid parameter");
5119                 return -EINVAL;
5120         }
5121
5122         pool_offset = base - pool->base;
5123         /* Lookup in alloc list */
5124         LIST_FOREACH(entry, &pool->alloc_list, next) {
5125                 if (entry->base == pool_offset) {
5126                         valid_entry = entry;
5127                         LIST_REMOVE(entry, next);
5128                         break;
5129                 }
5130         }
5131
5132         /* Not find, return */
5133         if (valid_entry == NULL) {
5134                 PMD_DRV_LOG(ERR, "Failed to find entry");
5135                 return -EINVAL;
5136         }
5137
5138         /**
5139          * Found it, move it to free list  and try to merge.
5140          * In order to make merge easier, always sort it by qbase.
5141          * Find adjacent prev and last entries.
5142          */
5143         prev = next = NULL;
5144         LIST_FOREACH(entry, &pool->free_list, next) {
5145                 if (entry->base > valid_entry->base) {
5146                         next = entry;
5147                         break;
5148                 }
5149                 prev = entry;
5150         }
5151
5152         insert = 0;
5153         len = valid_entry->len;
5154         /* Try to merge with next one*/
5155         if (next != NULL) {
5156                 /* Merge with next one */
5157                 if (valid_entry->base + len == next->base) {
5158                         next->base = valid_entry->base;
5159                         next->len += len;
5160                         rte_free(valid_entry);
5161                         valid_entry = next;
5162                         insert = 1;
5163                 }
5164         }
5165
5166         if (prev != NULL) {
5167                 /* Merge with previous one */
5168                 if (prev->base + prev->len == valid_entry->base) {
5169                         prev->len += len;
5170                         /* If it merge with next one, remove next node */
5171                         if (insert == 1) {
5172                                 LIST_REMOVE(valid_entry, next);
5173                                 rte_free(valid_entry);
5174                                 valid_entry = NULL;
5175                         } else {
5176                                 rte_free(valid_entry);
5177                                 valid_entry = NULL;
5178                                 insert = 1;
5179                         }
5180                 }
5181         }
5182
5183         /* Not find any entry to merge, insert */
5184         if (insert == 0) {
5185                 if (prev != NULL)
5186                         LIST_INSERT_AFTER(prev, valid_entry, next);
5187                 else if (next != NULL)
5188                         LIST_INSERT_BEFORE(next, valid_entry, next);
5189                 else /* It's empty list, insert to head */
5190                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5191         }
5192
5193         pool->num_free += len;
5194         pool->num_alloc -= len;
5195
5196         return 0;
5197 }
5198
5199 static int
5200 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5201                        uint16_t num)
5202 {
5203         struct pool_entry *entry, *valid_entry;
5204
5205         if (pool == NULL || num == 0) {
5206                 PMD_DRV_LOG(ERR, "Invalid parameter");
5207                 return -EINVAL;
5208         }
5209
5210         if (pool->num_free < num) {
5211                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5212                             num, pool->num_free);
5213                 return -ENOMEM;
5214         }
5215
5216         valid_entry = NULL;
5217         /* Lookup  in free list and find most fit one */
5218         LIST_FOREACH(entry, &pool->free_list, next) {
5219                 if (entry->len >= num) {
5220                         /* Find best one */
5221                         if (entry->len == num) {
5222                                 valid_entry = entry;
5223                                 break;
5224                         }
5225                         if (valid_entry == NULL || valid_entry->len > entry->len)
5226                                 valid_entry = entry;
5227                 }
5228         }
5229
5230         /* Not find one to satisfy the request, return */
5231         if (valid_entry == NULL) {
5232                 PMD_DRV_LOG(ERR, "No valid entry found");
5233                 return -ENOMEM;
5234         }
5235         /**
5236          * The entry have equal queue number as requested,
5237          * remove it from alloc_list.
5238          */
5239         if (valid_entry->len == num) {
5240                 LIST_REMOVE(valid_entry, next);
5241         } else {
5242                 /**
5243                  * The entry have more numbers than requested,
5244                  * create a new entry for alloc_list and minus its
5245                  * queue base and number in free_list.
5246                  */
5247                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5248                 if (entry == NULL) {
5249                         PMD_DRV_LOG(ERR,
5250                                 "Failed to allocate memory for resource pool");
5251                         return -ENOMEM;
5252                 }
5253                 entry->base = valid_entry->base;
5254                 entry->len = num;
5255                 valid_entry->base += num;
5256                 valid_entry->len -= num;
5257                 valid_entry = entry;
5258         }
5259
5260         /* Insert it into alloc list, not sorted */
5261         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5262
5263         pool->num_free -= valid_entry->len;
5264         pool->num_alloc += valid_entry->len;
5265
5266         return valid_entry->base + pool->base;
5267 }
5268
5269 /**
5270  * bitmap_is_subset - Check whether src2 is subset of src1
5271  **/
5272 static inline int
5273 bitmap_is_subset(uint8_t src1, uint8_t src2)
5274 {
5275         return !((src1 ^ src2) & src2);
5276 }
5277
5278 static enum i40e_status_code
5279 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5280 {
5281         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5282
5283         /* If DCB is not supported, only default TC is supported */
5284         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5285                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5286                 return I40E_NOT_SUPPORTED;
5287         }
5288
5289         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5290                 PMD_DRV_LOG(ERR,
5291                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5292                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5293                 return I40E_NOT_SUPPORTED;
5294         }
5295         return I40E_SUCCESS;
5296 }
5297
5298 int
5299 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5300                                 struct i40e_vsi_vlan_pvid_info *info)
5301 {
5302         struct i40e_hw *hw;
5303         struct i40e_vsi_context ctxt;
5304         uint8_t vlan_flags = 0;
5305         int ret;
5306
5307         if (vsi == NULL || info == NULL) {
5308                 PMD_DRV_LOG(ERR, "invalid parameters");
5309                 return I40E_ERR_PARAM;
5310         }
5311
5312         if (info->on) {
5313                 vsi->info.pvid = info->config.pvid;
5314                 /**
5315                  * If insert pvid is enabled, only tagged pkts are
5316                  * allowed to be sent out.
5317                  */
5318                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5319                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5320         } else {
5321                 vsi->info.pvid = 0;
5322                 if (info->config.reject.tagged == 0)
5323                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5324
5325                 if (info->config.reject.untagged == 0)
5326                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5327         }
5328         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5329                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5330         vsi->info.port_vlan_flags |= vlan_flags;
5331         vsi->info.valid_sections =
5332                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5333         memset(&ctxt, 0, sizeof(ctxt));
5334         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5335         ctxt.seid = vsi->seid;
5336
5337         hw = I40E_VSI_TO_HW(vsi);
5338         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5339         if (ret != I40E_SUCCESS)
5340                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5341
5342         return ret;
5343 }
5344
5345 static int
5346 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5347 {
5348         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5349         int i, ret;
5350         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5351
5352         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5353         if (ret != I40E_SUCCESS)
5354                 return ret;
5355
5356         if (!vsi->seid) {
5357                 PMD_DRV_LOG(ERR, "seid not valid");
5358                 return -EINVAL;
5359         }
5360
5361         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5362         tc_bw_data.tc_valid_bits = enabled_tcmap;
5363         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5364                 tc_bw_data.tc_bw_credits[i] =
5365                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5366
5367         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5368         if (ret != I40E_SUCCESS) {
5369                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5370                 return ret;
5371         }
5372
5373         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5374                                         sizeof(vsi->info.qs_handle));
5375         return I40E_SUCCESS;
5376 }
5377
5378 static enum i40e_status_code
5379 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5380                                  struct i40e_aqc_vsi_properties_data *info,
5381                                  uint8_t enabled_tcmap)
5382 {
5383         enum i40e_status_code ret;
5384         int i, total_tc = 0;
5385         uint16_t qpnum_per_tc, bsf, qp_idx;
5386
5387         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5388         if (ret != I40E_SUCCESS)
5389                 return ret;
5390
5391         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5392                 if (enabled_tcmap & (1 << i))
5393                         total_tc++;
5394         if (total_tc == 0)
5395                 total_tc = 1;
5396         vsi->enabled_tc = enabled_tcmap;
5397
5398         /* Number of queues per enabled TC */
5399         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5400         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5401         bsf = rte_bsf32(qpnum_per_tc);
5402
5403         /* Adjust the queue number to actual queues that can be applied */
5404         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5405                 vsi->nb_qps = qpnum_per_tc * total_tc;
5406
5407         /**
5408          * Configure TC and queue mapping parameters, for enabled TC,
5409          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5410          * default queue will serve it.
5411          */
5412         qp_idx = 0;
5413         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5414                 if (vsi->enabled_tc & (1 << i)) {
5415                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5416                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5417                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5418                         qp_idx += qpnum_per_tc;
5419                 } else
5420                         info->tc_mapping[i] = 0;
5421         }
5422
5423         /* Associate queue number with VSI */
5424         if (vsi->type == I40E_VSI_SRIOV) {
5425                 info->mapping_flags |=
5426                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5427                 for (i = 0; i < vsi->nb_qps; i++)
5428                         info->queue_mapping[i] =
5429                                 rte_cpu_to_le_16(vsi->base_queue + i);
5430         } else {
5431                 info->mapping_flags |=
5432                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5433                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5434         }
5435         info->valid_sections |=
5436                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5437
5438         return I40E_SUCCESS;
5439 }
5440
5441 static int
5442 i40e_veb_release(struct i40e_veb *veb)
5443 {
5444         struct i40e_vsi *vsi;
5445         struct i40e_hw *hw;
5446
5447         if (veb == NULL)
5448                 return -EINVAL;
5449
5450         if (!TAILQ_EMPTY(&veb->head)) {
5451                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5452                 return -EACCES;
5453         }
5454         /* associate_vsi field is NULL for floating VEB */
5455         if (veb->associate_vsi != NULL) {
5456                 vsi = veb->associate_vsi;
5457                 hw = I40E_VSI_TO_HW(vsi);
5458
5459                 vsi->uplink_seid = veb->uplink_seid;
5460                 vsi->veb = NULL;
5461         } else {
5462                 veb->associate_pf->main_vsi->floating_veb = NULL;
5463                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5464         }
5465
5466         i40e_aq_delete_element(hw, veb->seid, NULL);
5467         rte_free(veb);
5468         return I40E_SUCCESS;
5469 }
5470
5471 /* Setup a veb */
5472 static struct i40e_veb *
5473 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5474 {
5475         struct i40e_veb *veb;
5476         int ret;
5477         struct i40e_hw *hw;
5478
5479         if (pf == NULL) {
5480                 PMD_DRV_LOG(ERR,
5481                             "veb setup failed, associated PF shouldn't null");
5482                 return NULL;
5483         }
5484         hw = I40E_PF_TO_HW(pf);
5485
5486         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5487         if (!veb) {
5488                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5489                 goto fail;
5490         }
5491
5492         veb->associate_vsi = vsi;
5493         veb->associate_pf = pf;
5494         TAILQ_INIT(&veb->head);
5495         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5496
5497         /* create floating veb if vsi is NULL */
5498         if (vsi != NULL) {
5499                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5500                                       I40E_DEFAULT_TCMAP, false,
5501                                       &veb->seid, false, NULL);
5502         } else {
5503                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5504                                       true, &veb->seid, false, NULL);
5505         }
5506
5507         if (ret != I40E_SUCCESS) {
5508                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5509                             hw->aq.asq_last_status);
5510                 goto fail;
5511         }
5512         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5513
5514         /* get statistics index */
5515         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5516                                 &veb->stats_idx, NULL, NULL, NULL);
5517         if (ret != I40E_SUCCESS) {
5518                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5519                             hw->aq.asq_last_status);
5520                 goto fail;
5521         }
5522         /* Get VEB bandwidth, to be implemented */
5523         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5524         if (vsi)
5525                 vsi->uplink_seid = veb->seid;
5526
5527         return veb;
5528 fail:
5529         rte_free(veb);
5530         return NULL;
5531 }
5532
5533 int
5534 i40e_vsi_release(struct i40e_vsi *vsi)
5535 {
5536         struct i40e_pf *pf;
5537         struct i40e_hw *hw;
5538         struct i40e_vsi_list *vsi_list;
5539         void *temp;
5540         int ret;
5541         struct i40e_mac_filter *f;
5542         uint16_t user_param;
5543
5544         if (!vsi)
5545                 return I40E_SUCCESS;
5546
5547         if (!vsi->adapter)
5548                 return -EFAULT;
5549
5550         user_param = vsi->user_param;
5551
5552         pf = I40E_VSI_TO_PF(vsi);
5553         hw = I40E_VSI_TO_HW(vsi);
5554
5555         /* VSI has child to attach, release child first */
5556         if (vsi->veb) {
5557                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5558                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5559                                 return -1;
5560                 }
5561                 i40e_veb_release(vsi->veb);
5562         }
5563
5564         if (vsi->floating_veb) {
5565                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5566                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5567                                 return -1;
5568                 }
5569         }
5570
5571         /* Remove all macvlan filters of the VSI */
5572         i40e_vsi_remove_all_macvlan_filter(vsi);
5573         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5574                 rte_free(f);
5575
5576         if (vsi->type != I40E_VSI_MAIN &&
5577             ((vsi->type != I40E_VSI_SRIOV) ||
5578             !pf->floating_veb_list[user_param])) {
5579                 /* Remove vsi from parent's sibling list */
5580                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5581                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5582                         return I40E_ERR_PARAM;
5583                 }
5584                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5585                                 &vsi->sib_vsi_list, list);
5586
5587                 /* Remove all switch element of the VSI */
5588                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5589                 if (ret != I40E_SUCCESS)
5590                         PMD_DRV_LOG(ERR, "Failed to delete element");
5591         }
5592
5593         if ((vsi->type == I40E_VSI_SRIOV) &&
5594             pf->floating_veb_list[user_param]) {
5595                 /* Remove vsi from parent's sibling list */
5596                 if (vsi->parent_vsi == NULL ||
5597                     vsi->parent_vsi->floating_veb == NULL) {
5598                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5599                         return I40E_ERR_PARAM;
5600                 }
5601                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5602                              &vsi->sib_vsi_list, list);
5603
5604                 /* Remove all switch element of the VSI */
5605                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5606                 if (ret != I40E_SUCCESS)
5607                         PMD_DRV_LOG(ERR, "Failed to delete element");
5608         }
5609
5610         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5611
5612         if (vsi->type != I40E_VSI_SRIOV)
5613                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5614         rte_free(vsi);
5615
5616         return I40E_SUCCESS;
5617 }
5618
5619 static int
5620 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5621 {
5622         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5623         struct i40e_aqc_remove_macvlan_element_data def_filter;
5624         struct i40e_mac_filter_info filter;
5625         int ret;
5626
5627         if (vsi->type != I40E_VSI_MAIN)
5628                 return I40E_ERR_CONFIG;
5629         memset(&def_filter, 0, sizeof(def_filter));
5630         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5631                                         ETH_ADDR_LEN);
5632         def_filter.vlan_tag = 0;
5633         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5634                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5635         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5636         if (ret != I40E_SUCCESS) {
5637                 struct i40e_mac_filter *f;
5638                 struct rte_ether_addr *mac;
5639
5640                 PMD_DRV_LOG(DEBUG,
5641                             "Cannot remove the default macvlan filter");
5642                 /* It needs to add the permanent mac into mac list */
5643                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5644                 if (f == NULL) {
5645                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5646                         return I40E_ERR_NO_MEMORY;
5647                 }
5648                 mac = &f->mac_info.mac_addr;
5649                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5650                                 ETH_ADDR_LEN);
5651                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5652                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5653                 vsi->mac_num++;
5654
5655                 return ret;
5656         }
5657         rte_memcpy(&filter.mac_addr,
5658                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5659         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5660         return i40e_vsi_add_mac(vsi, &filter);
5661 }
5662
5663 /*
5664  * i40e_vsi_get_bw_config - Query VSI BW Information
5665  * @vsi: the VSI to be queried
5666  *
5667  * Returns 0 on success, negative value on failure
5668  */
5669 static enum i40e_status_code
5670 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5671 {
5672         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5673         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5674         struct i40e_hw *hw = &vsi->adapter->hw;
5675         i40e_status ret;
5676         int i;
5677         uint32_t bw_max;
5678
5679         memset(&bw_config, 0, sizeof(bw_config));
5680         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5681         if (ret != I40E_SUCCESS) {
5682                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5683                             hw->aq.asq_last_status);
5684                 return ret;
5685         }
5686
5687         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5688         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5689                                         &ets_sla_config, NULL);
5690         if (ret != I40E_SUCCESS) {
5691                 PMD_DRV_LOG(ERR,
5692                         "VSI failed to get TC bandwdith configuration %u",
5693                         hw->aq.asq_last_status);
5694                 return ret;
5695         }
5696
5697         /* store and print out BW info */
5698         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5699         vsi->bw_info.bw_max = bw_config.max_bw;
5700         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5701         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5702         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5703                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5704                      I40E_16_BIT_WIDTH);
5705         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5706                 vsi->bw_info.bw_ets_share_credits[i] =
5707                                 ets_sla_config.share_credits[i];
5708                 vsi->bw_info.bw_ets_credits[i] =
5709                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5710                 /* 4 bits per TC, 4th bit is reserved */
5711                 vsi->bw_info.bw_ets_max[i] =
5712                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5713                                   RTE_LEN2MASK(3, uint8_t));
5714                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5715                             vsi->bw_info.bw_ets_share_credits[i]);
5716                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5717                             vsi->bw_info.bw_ets_credits[i]);
5718                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5719                             vsi->bw_info.bw_ets_max[i]);
5720         }
5721
5722         return I40E_SUCCESS;
5723 }
5724
5725 /* i40e_enable_pf_lb
5726  * @pf: pointer to the pf structure
5727  *
5728  * allow loopback on pf
5729  */
5730 static inline void
5731 i40e_enable_pf_lb(struct i40e_pf *pf)
5732 {
5733         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5734         struct i40e_vsi_context ctxt;
5735         int ret;
5736
5737         /* Use the FW API if FW >= v5.0 */
5738         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5739                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5740                 return;
5741         }
5742
5743         memset(&ctxt, 0, sizeof(ctxt));
5744         ctxt.seid = pf->main_vsi_seid;
5745         ctxt.pf_num = hw->pf_id;
5746         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5747         if (ret) {
5748                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5749                             ret, hw->aq.asq_last_status);
5750                 return;
5751         }
5752         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5753         ctxt.info.valid_sections =
5754                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5755         ctxt.info.switch_id |=
5756                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5757
5758         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5759         if (ret)
5760                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5761                             hw->aq.asq_last_status);
5762 }
5763
5764 /* Setup a VSI */
5765 struct i40e_vsi *
5766 i40e_vsi_setup(struct i40e_pf *pf,
5767                enum i40e_vsi_type type,
5768                struct i40e_vsi *uplink_vsi,
5769                uint16_t user_param)
5770 {
5771         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5772         struct i40e_vsi *vsi;
5773         struct i40e_mac_filter_info filter;
5774         int ret;
5775         struct i40e_vsi_context ctxt;
5776         struct rte_ether_addr broadcast =
5777                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5778
5779         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5780             uplink_vsi == NULL) {
5781                 PMD_DRV_LOG(ERR,
5782                         "VSI setup failed, VSI link shouldn't be NULL");
5783                 return NULL;
5784         }
5785
5786         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5787                 PMD_DRV_LOG(ERR,
5788                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5789                 return NULL;
5790         }
5791
5792         /* two situations
5793          * 1.type is not MAIN and uplink vsi is not NULL
5794          * If uplink vsi didn't setup VEB, create one first under veb field
5795          * 2.type is SRIOV and the uplink is NULL
5796          * If floating VEB is NULL, create one veb under floating veb field
5797          */
5798
5799         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5800             uplink_vsi->veb == NULL) {
5801                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5802
5803                 if (uplink_vsi->veb == NULL) {
5804                         PMD_DRV_LOG(ERR, "VEB setup failed");
5805                         return NULL;
5806                 }
5807                 /* set ALLOWLOOPBACk on pf, when veb is created */
5808                 i40e_enable_pf_lb(pf);
5809         }
5810
5811         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5812             pf->main_vsi->floating_veb == NULL) {
5813                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5814
5815                 if (pf->main_vsi->floating_veb == NULL) {
5816                         PMD_DRV_LOG(ERR, "VEB setup failed");
5817                         return NULL;
5818                 }
5819         }
5820
5821         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5822         if (!vsi) {
5823                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5824                 return NULL;
5825         }
5826         TAILQ_INIT(&vsi->mac_list);
5827         vsi->type = type;
5828         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5829         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5830         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5831         vsi->user_param = user_param;
5832         vsi->vlan_anti_spoof_on = 0;
5833         vsi->vlan_filter_on = 0;
5834         /* Allocate queues */
5835         switch (vsi->type) {
5836         case I40E_VSI_MAIN  :
5837                 vsi->nb_qps = pf->lan_nb_qps;
5838                 break;
5839         case I40E_VSI_SRIOV :
5840                 vsi->nb_qps = pf->vf_nb_qps;
5841                 break;
5842         case I40E_VSI_VMDQ2:
5843                 vsi->nb_qps = pf->vmdq_nb_qps;
5844                 break;
5845         case I40E_VSI_FDIR:
5846                 vsi->nb_qps = pf->fdir_nb_qps;
5847                 break;
5848         default:
5849                 goto fail_mem;
5850         }
5851         /*
5852          * The filter status descriptor is reported in rx queue 0,
5853          * while the tx queue for fdir filter programming has no
5854          * such constraints, can be non-zero queues.
5855          * To simplify it, choose FDIR vsi use queue 0 pair.
5856          * To make sure it will use queue 0 pair, queue allocation
5857          * need be done before this function is called
5858          */
5859         if (type != I40E_VSI_FDIR) {
5860                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5861                         if (ret < 0) {
5862                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5863                                                 vsi->seid, ret);
5864                                 goto fail_mem;
5865                         }
5866                         vsi->base_queue = ret;
5867         } else
5868                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5869
5870         /* VF has MSIX interrupt in VF range, don't allocate here */
5871         if (type == I40E_VSI_MAIN) {
5872                 if (pf->support_multi_driver) {
5873                         /* If support multi-driver, need to use INT0 instead of
5874                          * allocating from msix pool. The Msix pool is init from
5875                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5876                          * to 1 without calling i40e_res_pool_alloc.
5877                          */
5878                         vsi->msix_intr = 0;
5879                         vsi->nb_msix = 1;
5880                 } else {
5881                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5882                                                   RTE_MIN(vsi->nb_qps,
5883                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5884                         if (ret < 0) {
5885                                 PMD_DRV_LOG(ERR,
5886                                             "VSI MAIN %d get heap failed %d",
5887                                             vsi->seid, ret);
5888                                 goto fail_queue_alloc;
5889                         }
5890                         vsi->msix_intr = ret;
5891                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5892                                                RTE_MAX_RXTX_INTR_VEC_ID);
5893                 }
5894         } else if (type != I40E_VSI_SRIOV) {
5895                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5896                 if (ret < 0) {
5897                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5898                         if (type != I40E_VSI_FDIR)
5899                                 goto fail_queue_alloc;
5900                         vsi->msix_intr = 0;
5901                         vsi->nb_msix = 0;
5902                 } else {
5903                         vsi->msix_intr = ret;
5904                         vsi->nb_msix = 1;
5905                 }
5906         } else {
5907                 vsi->msix_intr = 0;
5908                 vsi->nb_msix = 0;
5909         }
5910
5911         /* Add VSI */
5912         if (type == I40E_VSI_MAIN) {
5913                 /* For main VSI, no need to add since it's default one */
5914                 vsi->uplink_seid = pf->mac_seid;
5915                 vsi->seid = pf->main_vsi_seid;
5916                 /* Bind queues with specific MSIX interrupt */
5917                 /**
5918                  * Needs 2 interrupt at least, one for misc cause which will
5919                  * enabled from OS side, Another for queues binding the
5920                  * interrupt from device side only.
5921                  */
5922
5923                 /* Get default VSI parameters from hardware */
5924                 memset(&ctxt, 0, sizeof(ctxt));
5925                 ctxt.seid = vsi->seid;
5926                 ctxt.pf_num = hw->pf_id;
5927                 ctxt.uplink_seid = vsi->uplink_seid;
5928                 ctxt.vf_num = 0;
5929                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5930                 if (ret != I40E_SUCCESS) {
5931                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5932                         goto fail_msix_alloc;
5933                 }
5934                 rte_memcpy(&vsi->info, &ctxt.info,
5935                         sizeof(struct i40e_aqc_vsi_properties_data));
5936                 vsi->vsi_id = ctxt.vsi_number;
5937                 vsi->info.valid_sections = 0;
5938
5939                 /* Configure tc, enabled TC0 only */
5940                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5941                         I40E_SUCCESS) {
5942                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5943                         goto fail_msix_alloc;
5944                 }
5945
5946                 /* TC, queue mapping */
5947                 memset(&ctxt, 0, sizeof(ctxt));
5948                 vsi->info.valid_sections |=
5949                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5950                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5951                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5952                 rte_memcpy(&ctxt.info, &vsi->info,
5953                         sizeof(struct i40e_aqc_vsi_properties_data));
5954                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5955                                                 I40E_DEFAULT_TCMAP);
5956                 if (ret != I40E_SUCCESS) {
5957                         PMD_DRV_LOG(ERR,
5958                                 "Failed to configure TC queue mapping");
5959                         goto fail_msix_alloc;
5960                 }
5961                 ctxt.seid = vsi->seid;
5962                 ctxt.pf_num = hw->pf_id;
5963                 ctxt.uplink_seid = vsi->uplink_seid;
5964                 ctxt.vf_num = 0;
5965
5966                 /* Update VSI parameters */
5967                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5968                 if (ret != I40E_SUCCESS) {
5969                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5970                         goto fail_msix_alloc;
5971                 }
5972
5973                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5974                                                 sizeof(vsi->info.tc_mapping));
5975                 rte_memcpy(&vsi->info.queue_mapping,
5976                                 &ctxt.info.queue_mapping,
5977                         sizeof(vsi->info.queue_mapping));
5978                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5979                 vsi->info.valid_sections = 0;
5980
5981                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5982                                 ETH_ADDR_LEN);
5983
5984                 /**
5985                  * Updating default filter settings are necessary to prevent
5986                  * reception of tagged packets.
5987                  * Some old firmware configurations load a default macvlan
5988                  * filter which accepts both tagged and untagged packets.
5989                  * The updating is to use a normal filter instead if needed.
5990                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5991                  * The firmware with correct configurations load the default
5992                  * macvlan filter which is expected and cannot be removed.
5993                  */
5994                 i40e_update_default_filter_setting(vsi);
5995                 i40e_config_qinq(hw, vsi);
5996         } else if (type == I40E_VSI_SRIOV) {
5997                 memset(&ctxt, 0, sizeof(ctxt));
5998                 /**
5999                  * For other VSI, the uplink_seid equals to uplink VSI's
6000                  * uplink_seid since they share same VEB
6001                  */
6002                 if (uplink_vsi == NULL)
6003                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
6004                 else
6005                         vsi->uplink_seid = uplink_vsi->uplink_seid;
6006                 ctxt.pf_num = hw->pf_id;
6007                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
6008                 ctxt.uplink_seid = vsi->uplink_seid;
6009                 ctxt.connection_type = 0x1;
6010                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6011
6012                 /* Use the VEB configuration if FW >= v5.0 */
6013                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
6014                         /* Configure switch ID */
6015                         ctxt.info.valid_sections |=
6016                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6017                         ctxt.info.switch_id =
6018                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6019                 }
6020
6021                 /* Configure port/vlan */
6022                 ctxt.info.valid_sections |=
6023                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6024                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6025                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6026                                                 hw->func_caps.enabled_tcmap);
6027                 if (ret != I40E_SUCCESS) {
6028                         PMD_DRV_LOG(ERR,
6029                                 "Failed to configure TC queue mapping");
6030                         goto fail_msix_alloc;
6031                 }
6032
6033                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
6034                 ctxt.info.valid_sections |=
6035                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6036                 /**
6037                  * Since VSI is not created yet, only configure parameter,
6038                  * will add vsi below.
6039                  */
6040
6041                 i40e_config_qinq(hw, vsi);
6042         } else if (type == I40E_VSI_VMDQ2) {
6043                 memset(&ctxt, 0, sizeof(ctxt));
6044                 /*
6045                  * For other VSI, the uplink_seid equals to uplink VSI's
6046                  * uplink_seid since they share same VEB
6047                  */
6048                 vsi->uplink_seid = uplink_vsi->uplink_seid;
6049                 ctxt.pf_num = hw->pf_id;
6050                 ctxt.vf_num = 0;
6051                 ctxt.uplink_seid = vsi->uplink_seid;
6052                 ctxt.connection_type = 0x1;
6053                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6054
6055                 ctxt.info.valid_sections |=
6056                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6057                 /* user_param carries flag to enable loop back */
6058                 if (user_param) {
6059                         ctxt.info.switch_id =
6060                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6061                         ctxt.info.switch_id |=
6062                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6063                 }
6064
6065                 /* Configure port/vlan */
6066                 ctxt.info.valid_sections |=
6067                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6068                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6069                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6070                                                 I40E_DEFAULT_TCMAP);
6071                 if (ret != I40E_SUCCESS) {
6072                         PMD_DRV_LOG(ERR,
6073                                 "Failed to configure TC queue mapping");
6074                         goto fail_msix_alloc;
6075                 }
6076                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6077                 ctxt.info.valid_sections |=
6078                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6079         } else if (type == I40E_VSI_FDIR) {
6080                 memset(&ctxt, 0, sizeof(ctxt));
6081                 vsi->uplink_seid = uplink_vsi->uplink_seid;
6082                 ctxt.pf_num = hw->pf_id;
6083                 ctxt.vf_num = 0;
6084                 ctxt.uplink_seid = vsi->uplink_seid;
6085                 ctxt.connection_type = 0x1;     /* regular data port */
6086                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6087                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6088                                                 I40E_DEFAULT_TCMAP);
6089                 if (ret != I40E_SUCCESS) {
6090                         PMD_DRV_LOG(ERR,
6091                                 "Failed to configure TC queue mapping.");
6092                         goto fail_msix_alloc;
6093                 }
6094                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6095                 ctxt.info.valid_sections |=
6096                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6097         } else {
6098                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
6099                 goto fail_msix_alloc;
6100         }
6101
6102         if (vsi->type != I40E_VSI_MAIN) {
6103                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6104                 if (ret != I40E_SUCCESS) {
6105                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
6106                                     hw->aq.asq_last_status);
6107                         goto fail_msix_alloc;
6108                 }
6109                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6110                 vsi->info.valid_sections = 0;
6111                 vsi->seid = ctxt.seid;
6112                 vsi->vsi_id = ctxt.vsi_number;
6113                 vsi->sib_vsi_list.vsi = vsi;
6114                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
6115                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6116                                           &vsi->sib_vsi_list, list);
6117                 } else {
6118                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6119                                           &vsi->sib_vsi_list, list);
6120                 }
6121         }
6122
6123         /* MAC/VLAN configuration */
6124         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6125         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
6126
6127         ret = i40e_vsi_add_mac(vsi, &filter);
6128         if (ret != I40E_SUCCESS) {
6129                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6130                 goto fail_msix_alloc;
6131         }
6132
6133         /* Get VSI BW information */
6134         i40e_vsi_get_bw_config(vsi);
6135         return vsi;
6136 fail_msix_alloc:
6137         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6138 fail_queue_alloc:
6139         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6140 fail_mem:
6141         rte_free(vsi);
6142         return NULL;
6143 }
6144
6145 /* Configure vlan filter on or off */
6146 int
6147 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6148 {
6149         int i, num;
6150         struct i40e_mac_filter *f;
6151         void *temp;
6152         struct i40e_mac_filter_info *mac_filter;
6153         enum rte_mac_filter_type desired_filter;
6154         int ret = I40E_SUCCESS;
6155
6156         if (on) {
6157                 /* Filter to match MAC and VLAN */
6158                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
6159         } else {
6160                 /* Filter to match only MAC */
6161                 desired_filter = RTE_MAC_PERFECT_MATCH;
6162         }
6163
6164         num = vsi->mac_num;
6165
6166         mac_filter = rte_zmalloc("mac_filter_info_data",
6167                                  num * sizeof(*mac_filter), 0);
6168         if (mac_filter == NULL) {
6169                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6170                 return I40E_ERR_NO_MEMORY;
6171         }
6172
6173         i = 0;
6174
6175         /* Remove all existing mac */
6176         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6177                 mac_filter[i] = f->mac_info;
6178                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6179                 if (ret) {
6180                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6181                                     on ? "enable" : "disable");
6182                         goto DONE;
6183                 }
6184                 i++;
6185         }
6186
6187         /* Override with new filter */
6188         for (i = 0; i < num; i++) {
6189                 mac_filter[i].filter_type = desired_filter;
6190                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6191                 if (ret) {
6192                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6193                                     on ? "enable" : "disable");
6194                         goto DONE;
6195                 }
6196         }
6197
6198 DONE:
6199         rte_free(mac_filter);
6200         return ret;
6201 }
6202
6203 /* Configure vlan stripping on or off */
6204 int
6205 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6206 {
6207         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6208         struct i40e_vsi_context ctxt;
6209         uint8_t vlan_flags;
6210         int ret = I40E_SUCCESS;
6211
6212         /* Check if it has been already on or off */
6213         if (vsi->info.valid_sections &
6214                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6215                 if (on) {
6216                         if ((vsi->info.port_vlan_flags &
6217                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6218                                 return 0; /* already on */
6219                 } else {
6220                         if ((vsi->info.port_vlan_flags &
6221                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6222                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6223                                 return 0; /* already off */
6224                 }
6225         }
6226
6227         if (on)
6228                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6229         else
6230                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6231         vsi->info.valid_sections =
6232                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6233         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6234         vsi->info.port_vlan_flags |= vlan_flags;
6235         ctxt.seid = vsi->seid;
6236         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6237         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6238         if (ret)
6239                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6240                             on ? "enable" : "disable");
6241
6242         return ret;
6243 }
6244
6245 static int
6246 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6247 {
6248         struct rte_eth_dev_data *data = dev->data;
6249         int ret;
6250         int mask = 0;
6251
6252         /* Apply vlan offload setting */
6253         mask = ETH_VLAN_STRIP_MASK |
6254                ETH_QINQ_STRIP_MASK |
6255                ETH_VLAN_FILTER_MASK |
6256                ETH_VLAN_EXTEND_MASK;
6257         ret = i40e_vlan_offload_set(dev, mask);
6258         if (ret) {
6259                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6260                 return ret;
6261         }
6262
6263         /* Apply pvid setting */
6264         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6265                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6266         if (ret)
6267                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6268
6269         return ret;
6270 }
6271
6272 static int
6273 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6274 {
6275         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6276
6277         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6278 }
6279
6280 static int
6281 i40e_update_flow_control(struct i40e_hw *hw)
6282 {
6283 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6284         struct i40e_link_status link_status;
6285         uint32_t rxfc = 0, txfc = 0, reg;
6286         uint8_t an_info;
6287         int ret;
6288
6289         memset(&link_status, 0, sizeof(link_status));
6290         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6291         if (ret != I40E_SUCCESS) {
6292                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6293                 goto write_reg; /* Disable flow control */
6294         }
6295
6296         an_info = hw->phy.link_info.an_info;
6297         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6298                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6299                 ret = I40E_ERR_NOT_READY;
6300                 goto write_reg; /* Disable flow control */
6301         }
6302         /**
6303          * If link auto negotiation is enabled, flow control needs to
6304          * be configured according to it
6305          */
6306         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6307         case I40E_LINK_PAUSE_RXTX:
6308                 rxfc = 1;
6309                 txfc = 1;
6310                 hw->fc.current_mode = I40E_FC_FULL;
6311                 break;
6312         case I40E_AQ_LINK_PAUSE_RX:
6313                 rxfc = 1;
6314                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6315                 break;
6316         case I40E_AQ_LINK_PAUSE_TX:
6317                 txfc = 1;
6318                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6319                 break;
6320         default:
6321                 hw->fc.current_mode = I40E_FC_NONE;
6322                 break;
6323         }
6324
6325 write_reg:
6326         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6327                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6328         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6329         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6330         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6331         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6332
6333         return ret;
6334 }
6335
6336 /* PF setup */
6337 static int
6338 i40e_pf_setup(struct i40e_pf *pf)
6339 {
6340         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6341         struct i40e_filter_control_settings settings;
6342         struct i40e_vsi *vsi;
6343         int ret;
6344
6345         /* Clear all stats counters */
6346         pf->offset_loaded = FALSE;
6347         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6348         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6349         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6350         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6351
6352         ret = i40e_pf_get_switch_config(pf);
6353         if (ret != I40E_SUCCESS) {
6354                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6355                 return ret;
6356         }
6357
6358         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6359         if (ret)
6360                 PMD_INIT_LOG(WARNING,
6361                         "failed to allocate switch domain for device %d", ret);
6362
6363         if (pf->flags & I40E_FLAG_FDIR) {
6364                 /* make queue allocated first, let FDIR use queue pair 0*/
6365                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6366                 if (ret != I40E_FDIR_QUEUE_ID) {
6367                         PMD_DRV_LOG(ERR,
6368                                 "queue allocation fails for FDIR: ret =%d",
6369                                 ret);
6370                         pf->flags &= ~I40E_FLAG_FDIR;
6371                 }
6372         }
6373         /*  main VSI setup */
6374         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6375         if (!vsi) {
6376                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6377                 return I40E_ERR_NOT_READY;
6378         }
6379         pf->main_vsi = vsi;
6380
6381         /* Configure filter control */
6382         memset(&settings, 0, sizeof(settings));
6383         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6384                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6385         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6386                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6387         else {
6388                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6389                         hw->func_caps.rss_table_size);
6390                 return I40E_ERR_PARAM;
6391         }
6392         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6393                 hw->func_caps.rss_table_size);
6394         pf->hash_lut_size = hw->func_caps.rss_table_size;
6395
6396         /* Enable ethtype and macvlan filters */
6397         settings.enable_ethtype = TRUE;
6398         settings.enable_macvlan = TRUE;
6399         ret = i40e_set_filter_control(hw, &settings);
6400         if (ret)
6401                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6402                                                                 ret);
6403
6404         /* Update flow control according to the auto negotiation */
6405         i40e_update_flow_control(hw);
6406
6407         return I40E_SUCCESS;
6408 }
6409
6410 int
6411 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6412 {
6413         uint32_t reg;
6414         uint16_t j;
6415
6416         /**
6417          * Set or clear TX Queue Disable flags,
6418          * which is required by hardware.
6419          */
6420         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6421         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6422
6423         /* Wait until the request is finished */
6424         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6425                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6426                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6427                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6428                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6429                                                         & 0x1))) {
6430                         break;
6431                 }
6432         }
6433         if (on) {
6434                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6435                         return I40E_SUCCESS; /* already on, skip next steps */
6436
6437                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6438                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6439         } else {
6440                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6441                         return I40E_SUCCESS; /* already off, skip next steps */
6442                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6443         }
6444         /* Write the register */
6445         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6446         /* Check the result */
6447         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6448                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6449                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6450                 if (on) {
6451                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6452                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6453                                 break;
6454                 } else {
6455                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6456                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6457                                 break;
6458                 }
6459         }
6460         /* Check if it is timeout */
6461         if (j >= I40E_CHK_Q_ENA_COUNT) {
6462                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6463                             (on ? "enable" : "disable"), q_idx);
6464                 return I40E_ERR_TIMEOUT;
6465         }
6466
6467         return I40E_SUCCESS;
6468 }
6469
6470 int
6471 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6472 {
6473         uint32_t reg;
6474         uint16_t j;
6475
6476         /* Wait until the request is finished */
6477         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6478                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6479                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6480                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6481                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6482                         break;
6483         }
6484
6485         if (on) {
6486                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6487                         return I40E_SUCCESS; /* Already on, skip next steps */
6488                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6489         } else {
6490                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6491                         return I40E_SUCCESS; /* Already off, skip next steps */
6492                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6493         }
6494
6495         /* Write the register */
6496         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6497         /* Check the result */
6498         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6499                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6500                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6501                 if (on) {
6502                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6503                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6504                                 break;
6505                 } else {
6506                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6507                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6508                                 break;
6509                 }
6510         }
6511
6512         /* Check if it is timeout */
6513         if (j >= I40E_CHK_Q_ENA_COUNT) {
6514                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6515                             (on ? "enable" : "disable"), q_idx);
6516                 return I40E_ERR_TIMEOUT;
6517         }
6518
6519         return I40E_SUCCESS;
6520 }
6521
6522 /* Initialize VSI for TX */
6523 static int
6524 i40e_dev_tx_init(struct i40e_pf *pf)
6525 {
6526         struct rte_eth_dev_data *data = pf->dev_data;
6527         uint16_t i;
6528         uint32_t ret = I40E_SUCCESS;
6529         struct i40e_tx_queue *txq;
6530
6531         for (i = 0; i < data->nb_tx_queues; i++) {
6532                 txq = data->tx_queues[i];
6533                 if (!txq || !txq->q_set)
6534                         continue;
6535                 ret = i40e_tx_queue_init(txq);
6536                 if (ret != I40E_SUCCESS)
6537                         break;
6538         }
6539         if (ret == I40E_SUCCESS)
6540                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6541                                      ->eth_dev);
6542
6543         return ret;
6544 }
6545
6546 /* Initialize VSI for RX */
6547 static int
6548 i40e_dev_rx_init(struct i40e_pf *pf)
6549 {
6550         struct rte_eth_dev_data *data = pf->dev_data;
6551         int ret = I40E_SUCCESS;
6552         uint16_t i;
6553         struct i40e_rx_queue *rxq;
6554
6555         i40e_pf_config_rss(pf);
6556         for (i = 0; i < data->nb_rx_queues; i++) {
6557                 rxq = data->rx_queues[i];
6558                 if (!rxq || !rxq->q_set)
6559                         continue;
6560
6561                 ret = i40e_rx_queue_init(rxq);
6562                 if (ret != I40E_SUCCESS) {
6563                         PMD_DRV_LOG(ERR,
6564                                 "Failed to do RX queue initialization");
6565                         break;
6566                 }
6567         }
6568         if (ret == I40E_SUCCESS)
6569                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6570                                      ->eth_dev);
6571
6572         return ret;
6573 }
6574
6575 static int
6576 i40e_dev_rxtx_init(struct i40e_pf *pf)
6577 {
6578         int err;
6579
6580         err = i40e_dev_tx_init(pf);
6581         if (err) {
6582                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6583                 return err;
6584         }
6585         err = i40e_dev_rx_init(pf);
6586         if (err) {
6587                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6588                 return err;
6589         }
6590
6591         return err;
6592 }
6593
6594 static int
6595 i40e_vmdq_setup(struct rte_eth_dev *dev)
6596 {
6597         struct rte_eth_conf *conf = &dev->data->dev_conf;
6598         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6599         int i, err, conf_vsis, j, loop;
6600         struct i40e_vsi *vsi;
6601         struct i40e_vmdq_info *vmdq_info;
6602         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6603         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6604
6605         /*
6606          * Disable interrupt to avoid message from VF. Furthermore, it will
6607          * avoid race condition in VSI creation/destroy.
6608          */
6609         i40e_pf_disable_irq0(hw);
6610
6611         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6612                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6613                 return -ENOTSUP;
6614         }
6615
6616         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6617         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6618                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6619                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6620                         pf->max_nb_vmdq_vsi);
6621                 return -ENOTSUP;
6622         }
6623
6624         if (pf->vmdq != NULL) {
6625                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6626                 return 0;
6627         }
6628
6629         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6630                                 sizeof(*vmdq_info) * conf_vsis, 0);
6631
6632         if (pf->vmdq == NULL) {
6633                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6634                 return -ENOMEM;
6635         }
6636
6637         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6638
6639         /* Create VMDQ VSI */
6640         for (i = 0; i < conf_vsis; i++) {
6641                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6642                                 vmdq_conf->enable_loop_back);
6643                 if (vsi == NULL) {
6644                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6645                         err = -1;
6646                         goto err_vsi_setup;
6647                 }
6648                 vmdq_info = &pf->vmdq[i];
6649                 vmdq_info->pf = pf;
6650                 vmdq_info->vsi = vsi;
6651         }
6652         pf->nb_cfg_vmdq_vsi = conf_vsis;
6653
6654         /* Configure Vlan */
6655         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6656         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6657                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6658                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6659                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6660                                         vmdq_conf->pool_map[i].vlan_id, j);
6661
6662                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6663                                                 vmdq_conf->pool_map[i].vlan_id);
6664                                 if (err) {
6665                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6666                                         err = -1;
6667                                         goto err_vsi_setup;
6668                                 }
6669                         }
6670                 }
6671         }
6672
6673         i40e_pf_enable_irq0(hw);
6674
6675         return 0;
6676
6677 err_vsi_setup:
6678         for (i = 0; i < conf_vsis; i++)
6679                 if (pf->vmdq[i].vsi == NULL)
6680                         break;
6681                 else
6682                         i40e_vsi_release(pf->vmdq[i].vsi);
6683
6684         rte_free(pf->vmdq);
6685         pf->vmdq = NULL;
6686         i40e_pf_enable_irq0(hw);
6687         return err;
6688 }
6689
6690 static void
6691 i40e_stat_update_32(struct i40e_hw *hw,
6692                    uint32_t reg,
6693                    bool offset_loaded,
6694                    uint64_t *offset,
6695                    uint64_t *stat)
6696 {
6697         uint64_t new_data;
6698
6699         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6700         if (!offset_loaded)
6701                 *offset = new_data;
6702
6703         if (new_data >= *offset)
6704                 *stat = (uint64_t)(new_data - *offset);
6705         else
6706                 *stat = (uint64_t)((new_data +
6707                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6708 }
6709
6710 static void
6711 i40e_stat_update_48(struct i40e_hw *hw,
6712                    uint32_t hireg,
6713                    uint32_t loreg,
6714                    bool offset_loaded,
6715                    uint64_t *offset,
6716                    uint64_t *stat)
6717 {
6718         uint64_t new_data;
6719
6720         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6721         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6722                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6723
6724         if (!offset_loaded)
6725                 *offset = new_data;
6726
6727         if (new_data >= *offset)
6728                 *stat = new_data - *offset;
6729         else
6730                 *stat = (uint64_t)((new_data +
6731                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6732
6733         *stat &= I40E_48_BIT_MASK;
6734 }
6735
6736 /* Disable IRQ0 */
6737 void
6738 i40e_pf_disable_irq0(struct i40e_hw *hw)
6739 {
6740         /* Disable all interrupt types */
6741         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6742                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6743         I40E_WRITE_FLUSH(hw);
6744 }
6745
6746 /* Enable IRQ0 */
6747 void
6748 i40e_pf_enable_irq0(struct i40e_hw *hw)
6749 {
6750         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6751                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6752                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6753                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6754         I40E_WRITE_FLUSH(hw);
6755 }
6756
6757 static void
6758 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6759 {
6760         /* read pending request and disable first */
6761         i40e_pf_disable_irq0(hw);
6762         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6763         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6764                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6765
6766         if (no_queue)
6767                 /* Link no queues with irq0 */
6768                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6769                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6770 }
6771
6772 static void
6773 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6774 {
6775         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6776         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6777         int i;
6778         uint16_t abs_vf_id;
6779         uint32_t index, offset, val;
6780
6781         if (!pf->vfs)
6782                 return;
6783         /**
6784          * Try to find which VF trigger a reset, use absolute VF id to access
6785          * since the reg is global register.
6786          */
6787         for (i = 0; i < pf->vf_num; i++) {
6788                 abs_vf_id = hw->func_caps.vf_base_id + i;
6789                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6790                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6791                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6792                 /* VFR event occurred */
6793                 if (val & (0x1 << offset)) {
6794                         int ret;
6795
6796                         /* Clear the event first */
6797                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6798                                                         (0x1 << offset));
6799                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6800                         /**
6801                          * Only notify a VF reset event occurred,
6802                          * don't trigger another SW reset
6803                          */
6804                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6805                         if (ret != I40E_SUCCESS)
6806                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6807                 }
6808         }
6809 }
6810
6811 static void
6812 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6813 {
6814         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6815         int i;
6816
6817         for (i = 0; i < pf->vf_num; i++)
6818                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6819 }
6820
6821 static void
6822 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6823 {
6824         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6825         struct i40e_arq_event_info info;
6826         uint16_t pending, opcode;
6827         int ret;
6828
6829         info.buf_len = I40E_AQ_BUF_SZ;
6830         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6831         if (!info.msg_buf) {
6832                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6833                 return;
6834         }
6835
6836         pending = 1;
6837         while (pending) {
6838                 ret = i40e_clean_arq_element(hw, &info, &pending);
6839
6840                 if (ret != I40E_SUCCESS) {
6841                         PMD_DRV_LOG(INFO,
6842                                 "Failed to read msg from AdminQ, aq_err: %u",
6843                                 hw->aq.asq_last_status);
6844                         break;
6845                 }
6846                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6847
6848                 switch (opcode) {
6849                 case i40e_aqc_opc_send_msg_to_pf:
6850                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6851                         i40e_pf_host_handle_vf_msg(dev,
6852                                         rte_le_to_cpu_16(info.desc.retval),
6853                                         rte_le_to_cpu_32(info.desc.cookie_high),
6854                                         rte_le_to_cpu_32(info.desc.cookie_low),
6855                                         info.msg_buf,
6856                                         info.msg_len);
6857                         break;
6858                 case i40e_aqc_opc_get_link_status:
6859                         ret = i40e_dev_link_update(dev, 0);
6860                         if (!ret)
6861                                 rte_eth_dev_callback_process(dev,
6862                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6863                         break;
6864                 default:
6865                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6866                                     opcode);
6867                         break;
6868                 }
6869         }
6870         rte_free(info.msg_buf);
6871 }
6872
6873 static void
6874 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6875 {
6876 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6877 #define I40E_MDD_CLEAR16 0xFFFF
6878         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6879         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6880         bool mdd_detected = false;
6881         struct i40e_pf_vf *vf;
6882         uint32_t reg;
6883         int i;
6884
6885         /* find what triggered the MDD event */
6886         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6887         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6888                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6889                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6890                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6891                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6892                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6893                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6894                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6895                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6896                                         hw->func_caps.base_queue;
6897                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6898                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6899                                 event, queue, pf_num, vf_num, dev->data->name);
6900                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6901                 mdd_detected = true;
6902         }
6903         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6904         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6905                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6906                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6907                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6908                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6909                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6910                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6911                                         hw->func_caps.base_queue;
6912
6913                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6914                                 "queue %d of function 0x%02x device %s\n",
6915                                         event, queue, func, dev->data->name);
6916                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6917                 mdd_detected = true;
6918         }
6919
6920         if (mdd_detected) {
6921                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6922                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6923                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6924                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6925                 }
6926                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6927                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6928                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6929                                         I40E_MDD_CLEAR16);
6930                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6931                 }
6932         }
6933
6934         /* see if one of the VFs needs its hand slapped */
6935         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6936                 vf = &pf->vfs[i];
6937                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6938                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6939                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6940                                         I40E_MDD_CLEAR16);
6941                         vf->num_mdd_events++;
6942                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6943                                         PRIu64 "times\n",
6944                                         i, vf->num_mdd_events);
6945                 }
6946
6947                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6948                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6949                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6950                                         I40E_MDD_CLEAR16);
6951                         vf->num_mdd_events++;
6952                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6953                                         PRIu64 "times\n",
6954                                         i, vf->num_mdd_events);
6955                 }
6956         }
6957 }
6958
6959 /**
6960  * Interrupt handler triggered by NIC  for handling
6961  * specific interrupt.
6962  *
6963  * @param handle
6964  *  Pointer to interrupt handle.
6965  * @param param
6966  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6967  *
6968  * @return
6969  *  void
6970  */
6971 static void
6972 i40e_dev_interrupt_handler(void *param)
6973 {
6974         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6975         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6976         uint32_t icr0;
6977
6978         /* Disable interrupt */
6979         i40e_pf_disable_irq0(hw);
6980
6981         /* read out interrupt causes */
6982         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6983
6984         /* No interrupt event indicated */
6985         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6986                 PMD_DRV_LOG(INFO, "No interrupt event");
6987                 goto done;
6988         }
6989         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6990                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6991         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6992                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6993                 i40e_handle_mdd_event(dev);
6994         }
6995         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6996                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6997         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6998                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6999         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
7000                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7001         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7002                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
7003         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7004                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7005
7006         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7007                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7008                 i40e_dev_handle_vfr_event(dev);
7009         }
7010         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7011                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
7012                 i40e_dev_handle_aq_msg(dev);
7013         }
7014
7015 done:
7016         /* Enable interrupt */
7017         i40e_pf_enable_irq0(hw);
7018 }
7019
7020 static void
7021 i40e_dev_alarm_handler(void *param)
7022 {
7023         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
7024         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7025         uint32_t icr0;
7026
7027         /* Disable interrupt */
7028         i40e_pf_disable_irq0(hw);
7029
7030         /* read out interrupt causes */
7031         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
7032
7033         /* No interrupt event indicated */
7034         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
7035                 goto done;
7036         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
7037                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
7038         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
7039                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
7040                 i40e_handle_mdd_event(dev);
7041         }
7042         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
7043                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
7044         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
7045                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
7046         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
7047                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7048         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7049                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
7050         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7051                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7052
7053         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7054                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7055                 i40e_dev_handle_vfr_event(dev);
7056         }
7057         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7058                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
7059                 i40e_dev_handle_aq_msg(dev);
7060         }
7061
7062 done:
7063         /* Enable interrupt */
7064         i40e_pf_enable_irq0(hw);
7065         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
7066                           i40e_dev_alarm_handler, dev);
7067 }
7068
7069 int
7070 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
7071                          struct i40e_macvlan_filter *filter,
7072                          int total)
7073 {
7074         int ele_num, ele_buff_size;
7075         int num, actual_num, i;
7076         uint16_t flags;
7077         int ret = I40E_SUCCESS;
7078         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7079         struct i40e_aqc_add_macvlan_element_data *req_list;
7080
7081         if (filter == NULL  || total == 0)
7082                 return I40E_ERR_PARAM;
7083         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7084         ele_buff_size = hw->aq.asq_buf_size;
7085
7086         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
7087         if (req_list == NULL) {
7088                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7089                 return I40E_ERR_NO_MEMORY;
7090         }
7091
7092         num = 0;
7093         do {
7094                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7095                 memset(req_list, 0, ele_buff_size);
7096
7097                 for (i = 0; i < actual_num; i++) {
7098                         rte_memcpy(req_list[i].mac_addr,
7099                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7100                         req_list[i].vlan_tag =
7101                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7102
7103                         switch (filter[num + i].filter_type) {
7104                         case RTE_MAC_PERFECT_MATCH:
7105                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
7106                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7107                                 break;
7108                         case RTE_MACVLAN_PERFECT_MATCH:
7109                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7110                                 break;
7111                         case RTE_MAC_HASH_MATCH:
7112                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
7113                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7114                                 break;
7115                         case RTE_MACVLAN_HASH_MATCH:
7116                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7117                                 break;
7118                         default:
7119                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
7120                                 ret = I40E_ERR_PARAM;
7121                                 goto DONE;
7122                         }
7123
7124                         req_list[i].queue_number = 0;
7125
7126                         req_list[i].flags = rte_cpu_to_le_16(flags);
7127                 }
7128
7129                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7130                                                 actual_num, NULL);
7131                 if (ret != I40E_SUCCESS) {
7132                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7133                         goto DONE;
7134                 }
7135                 num += actual_num;
7136         } while (num < total);
7137
7138 DONE:
7139         rte_free(req_list);
7140         return ret;
7141 }
7142
7143 int
7144 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7145                             struct i40e_macvlan_filter *filter,
7146                             int total)
7147 {
7148         int ele_num, ele_buff_size;
7149         int num, actual_num, i;
7150         uint16_t flags;
7151         int ret = I40E_SUCCESS;
7152         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7153         struct i40e_aqc_remove_macvlan_element_data *req_list;
7154
7155         if (filter == NULL  || total == 0)
7156                 return I40E_ERR_PARAM;
7157
7158         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7159         ele_buff_size = hw->aq.asq_buf_size;
7160
7161         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7162         if (req_list == NULL) {
7163                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7164                 return I40E_ERR_NO_MEMORY;
7165         }
7166
7167         num = 0;
7168         do {
7169                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7170                 memset(req_list, 0, ele_buff_size);
7171
7172                 for (i = 0; i < actual_num; i++) {
7173                         rte_memcpy(req_list[i].mac_addr,
7174                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7175                         req_list[i].vlan_tag =
7176                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7177
7178                         switch (filter[num + i].filter_type) {
7179                         case RTE_MAC_PERFECT_MATCH:
7180                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7181                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7182                                 break;
7183                         case RTE_MACVLAN_PERFECT_MATCH:
7184                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7185                                 break;
7186                         case RTE_MAC_HASH_MATCH:
7187                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7188                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7189                                 break;
7190                         case RTE_MACVLAN_HASH_MATCH:
7191                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7192                                 break;
7193                         default:
7194                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7195                                 ret = I40E_ERR_PARAM;
7196                                 goto DONE;
7197                         }
7198                         req_list[i].flags = rte_cpu_to_le_16(flags);
7199                 }
7200
7201                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7202                                                 actual_num, NULL);
7203                 if (ret != I40E_SUCCESS) {
7204                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7205                         goto DONE;
7206                 }
7207                 num += actual_num;
7208         } while (num < total);
7209
7210 DONE:
7211         rte_free(req_list);
7212         return ret;
7213 }
7214
7215 /* Find out specific MAC filter */
7216 static struct i40e_mac_filter *
7217 i40e_find_mac_filter(struct i40e_vsi *vsi,
7218                          struct rte_ether_addr *macaddr)
7219 {
7220         struct i40e_mac_filter *f;
7221
7222         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7223                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7224                         return f;
7225         }
7226
7227         return NULL;
7228 }
7229
7230 static bool
7231 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7232                          uint16_t vlan_id)
7233 {
7234         uint32_t vid_idx, vid_bit;
7235
7236         if (vlan_id > ETH_VLAN_ID_MAX)
7237                 return 0;
7238
7239         vid_idx = I40E_VFTA_IDX(vlan_id);
7240         vid_bit = I40E_VFTA_BIT(vlan_id);
7241
7242         if (vsi->vfta[vid_idx] & vid_bit)
7243                 return 1;
7244         else
7245                 return 0;
7246 }
7247
7248 static void
7249 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7250                        uint16_t vlan_id, bool on)
7251 {
7252         uint32_t vid_idx, vid_bit;
7253
7254         vid_idx = I40E_VFTA_IDX(vlan_id);
7255         vid_bit = I40E_VFTA_BIT(vlan_id);
7256
7257         if (on)
7258                 vsi->vfta[vid_idx] |= vid_bit;
7259         else
7260                 vsi->vfta[vid_idx] &= ~vid_bit;
7261 }
7262
7263 void
7264 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7265                      uint16_t vlan_id, bool on)
7266 {
7267         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7268         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7269         int ret;
7270
7271         if (vlan_id > ETH_VLAN_ID_MAX)
7272                 return;
7273
7274         i40e_store_vlan_filter(vsi, vlan_id, on);
7275
7276         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7277                 return;
7278
7279         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7280
7281         if (on) {
7282                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7283                                        &vlan_data, 1, NULL);
7284                 if (ret != I40E_SUCCESS)
7285                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7286         } else {
7287                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7288                                           &vlan_data, 1, NULL);
7289                 if (ret != I40E_SUCCESS)
7290                         PMD_DRV_LOG(ERR,
7291                                     "Failed to remove vlan filter");
7292         }
7293 }
7294
7295 /**
7296  * Find all vlan options for specific mac addr,
7297  * return with actual vlan found.
7298  */
7299 int
7300 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7301                            struct i40e_macvlan_filter *mv_f,
7302                            int num, struct rte_ether_addr *addr)
7303 {
7304         int i;
7305         uint32_t j, k;
7306
7307         /**
7308          * Not to use i40e_find_vlan_filter to decrease the loop time,
7309          * although the code looks complex.
7310           */
7311         if (num < vsi->vlan_num)
7312                 return I40E_ERR_PARAM;
7313
7314         i = 0;
7315         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7316                 if (vsi->vfta[j]) {
7317                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7318                                 if (vsi->vfta[j] & (1 << k)) {
7319                                         if (i > num - 1) {
7320                                                 PMD_DRV_LOG(ERR,
7321                                                         "vlan number doesn't match");
7322                                                 return I40E_ERR_PARAM;
7323                                         }
7324                                         rte_memcpy(&mv_f[i].macaddr,
7325                                                         addr, ETH_ADDR_LEN);
7326                                         mv_f[i].vlan_id =
7327                                                 j * I40E_UINT32_BIT_SIZE + k;
7328                                         i++;
7329                                 }
7330                         }
7331                 }
7332         }
7333         return I40E_SUCCESS;
7334 }
7335
7336 static inline int
7337 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7338                            struct i40e_macvlan_filter *mv_f,
7339                            int num,
7340                            uint16_t vlan)
7341 {
7342         int i = 0;
7343         struct i40e_mac_filter *f;
7344
7345         if (num < vsi->mac_num)
7346                 return I40E_ERR_PARAM;
7347
7348         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7349                 if (i > num - 1) {
7350                         PMD_DRV_LOG(ERR, "buffer number not match");
7351                         return I40E_ERR_PARAM;
7352                 }
7353                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7354                                 ETH_ADDR_LEN);
7355                 mv_f[i].vlan_id = vlan;
7356                 mv_f[i].filter_type = f->mac_info.filter_type;
7357                 i++;
7358         }
7359
7360         return I40E_SUCCESS;
7361 }
7362
7363 static int
7364 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7365 {
7366         int i, j, num;
7367         struct i40e_mac_filter *f;
7368         struct i40e_macvlan_filter *mv_f;
7369         int ret = I40E_SUCCESS;
7370
7371         if (vsi == NULL || vsi->mac_num == 0)
7372                 return I40E_ERR_PARAM;
7373
7374         /* Case that no vlan is set */
7375         if (vsi->vlan_num == 0)
7376                 num = vsi->mac_num;
7377         else
7378                 num = vsi->mac_num * vsi->vlan_num;
7379
7380         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7381         if (mv_f == NULL) {
7382                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7383                 return I40E_ERR_NO_MEMORY;
7384         }
7385
7386         i = 0;
7387         if (vsi->vlan_num == 0) {
7388                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7389                         rte_memcpy(&mv_f[i].macaddr,
7390                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7391                         mv_f[i].filter_type = f->mac_info.filter_type;
7392                         mv_f[i].vlan_id = 0;
7393                         i++;
7394                 }
7395         } else {
7396                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7397                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7398                                         vsi->vlan_num, &f->mac_info.mac_addr);
7399                         if (ret != I40E_SUCCESS)
7400                                 goto DONE;
7401                         for (j = i; j < i + vsi->vlan_num; j++)
7402                                 mv_f[j].filter_type = f->mac_info.filter_type;
7403                         i += vsi->vlan_num;
7404                 }
7405         }
7406
7407         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7408 DONE:
7409         rte_free(mv_f);
7410
7411         return ret;
7412 }
7413
7414 int
7415 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7416 {
7417         struct i40e_macvlan_filter *mv_f;
7418         int mac_num;
7419         int ret = I40E_SUCCESS;
7420
7421         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7422                 return I40E_ERR_PARAM;
7423
7424         /* If it's already set, just return */
7425         if (i40e_find_vlan_filter(vsi,vlan))
7426                 return I40E_SUCCESS;
7427
7428         mac_num = vsi->mac_num;
7429
7430         if (mac_num == 0) {
7431                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7432                 return I40E_ERR_PARAM;
7433         }
7434
7435         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7436
7437         if (mv_f == NULL) {
7438                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7439                 return I40E_ERR_NO_MEMORY;
7440         }
7441
7442         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7443
7444         if (ret != I40E_SUCCESS)
7445                 goto DONE;
7446
7447         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7448
7449         if (ret != I40E_SUCCESS)
7450                 goto DONE;
7451
7452         i40e_set_vlan_filter(vsi, vlan, 1);
7453
7454         vsi->vlan_num++;
7455         ret = I40E_SUCCESS;
7456 DONE:
7457         rte_free(mv_f);
7458         return ret;
7459 }
7460
7461 int
7462 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7463 {
7464         struct i40e_macvlan_filter *mv_f;
7465         int mac_num;
7466         int ret = I40E_SUCCESS;
7467
7468         /**
7469          * Vlan 0 is the generic filter for untagged packets
7470          * and can't be removed.
7471          */
7472         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7473                 return I40E_ERR_PARAM;
7474
7475         /* If can't find it, just return */
7476         if (!i40e_find_vlan_filter(vsi, vlan))
7477                 return I40E_ERR_PARAM;
7478
7479         mac_num = vsi->mac_num;
7480
7481         if (mac_num == 0) {
7482                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7483                 return I40E_ERR_PARAM;
7484         }
7485
7486         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7487
7488         if (mv_f == NULL) {
7489                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7490                 return I40E_ERR_NO_MEMORY;
7491         }
7492
7493         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7494
7495         if (ret != I40E_SUCCESS)
7496                 goto DONE;
7497
7498         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7499
7500         if (ret != I40E_SUCCESS)
7501                 goto DONE;
7502
7503         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7504         if (vsi->vlan_num == 1) {
7505                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7506                 if (ret != I40E_SUCCESS)
7507                         goto DONE;
7508
7509                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7510                 if (ret != I40E_SUCCESS)
7511                         goto DONE;
7512         }
7513
7514         i40e_set_vlan_filter(vsi, vlan, 0);
7515
7516         vsi->vlan_num--;
7517         ret = I40E_SUCCESS;
7518 DONE:
7519         rte_free(mv_f);
7520         return ret;
7521 }
7522
7523 int
7524 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7525 {
7526         struct i40e_mac_filter *f;
7527         struct i40e_macvlan_filter *mv_f;
7528         int i, vlan_num = 0;
7529         int ret = I40E_SUCCESS;
7530
7531         /* If it's add and we've config it, return */
7532         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7533         if (f != NULL)
7534                 return I40E_SUCCESS;
7535         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
7536                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
7537
7538                 /**
7539                  * If vlan_num is 0, that's the first time to add mac,
7540                  * set mask for vlan_id 0.
7541                  */
7542                 if (vsi->vlan_num == 0) {
7543                         i40e_set_vlan_filter(vsi, 0, 1);
7544                         vsi->vlan_num = 1;
7545                 }
7546                 vlan_num = vsi->vlan_num;
7547         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
7548                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
7549                 vlan_num = 1;
7550
7551         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7552         if (mv_f == NULL) {
7553                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7554                 return I40E_ERR_NO_MEMORY;
7555         }
7556
7557         for (i = 0; i < vlan_num; i++) {
7558                 mv_f[i].filter_type = mac_filter->filter_type;
7559                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7560                                 ETH_ADDR_LEN);
7561         }
7562
7563         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7564                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
7565                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7566                                         &mac_filter->mac_addr);
7567                 if (ret != I40E_SUCCESS)
7568                         goto DONE;
7569         }
7570
7571         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7572         if (ret != I40E_SUCCESS)
7573                 goto DONE;
7574
7575         /* Add the mac addr into mac list */
7576         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7577         if (f == NULL) {
7578                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7579                 ret = I40E_ERR_NO_MEMORY;
7580                 goto DONE;
7581         }
7582         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7583                         ETH_ADDR_LEN);
7584         f->mac_info.filter_type = mac_filter->filter_type;
7585         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7586         vsi->mac_num++;
7587
7588         ret = I40E_SUCCESS;
7589 DONE:
7590         rte_free(mv_f);
7591
7592         return ret;
7593 }
7594
7595 int
7596 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7597 {
7598         struct i40e_mac_filter *f;
7599         struct i40e_macvlan_filter *mv_f;
7600         int i, vlan_num;
7601         enum rte_mac_filter_type filter_type;
7602         int ret = I40E_SUCCESS;
7603
7604         /* Can't find it, return an error */
7605         f = i40e_find_mac_filter(vsi, addr);
7606         if (f == NULL)
7607                 return I40E_ERR_PARAM;
7608
7609         vlan_num = vsi->vlan_num;
7610         filter_type = f->mac_info.filter_type;
7611         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7612                 filter_type == RTE_MACVLAN_HASH_MATCH) {
7613                 if (vlan_num == 0) {
7614                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7615                         return I40E_ERR_PARAM;
7616                 }
7617         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7618                         filter_type == RTE_MAC_HASH_MATCH)
7619                 vlan_num = 1;
7620
7621         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7622         if (mv_f == NULL) {
7623                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7624                 return I40E_ERR_NO_MEMORY;
7625         }
7626
7627         for (i = 0; i < vlan_num; i++) {
7628                 mv_f[i].filter_type = filter_type;
7629                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7630                                 ETH_ADDR_LEN);
7631         }
7632         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7633                         filter_type == RTE_MACVLAN_HASH_MATCH) {
7634                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7635                 if (ret != I40E_SUCCESS)
7636                         goto DONE;
7637         }
7638
7639         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7640         if (ret != I40E_SUCCESS)
7641                 goto DONE;
7642
7643         /* Remove the mac addr into mac list */
7644         TAILQ_REMOVE(&vsi->mac_list, f, next);
7645         rte_free(f);
7646         vsi->mac_num--;
7647
7648         ret = I40E_SUCCESS;
7649 DONE:
7650         rte_free(mv_f);
7651         return ret;
7652 }
7653
7654 /* Configure hash enable flags for RSS */
7655 uint64_t
7656 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7657 {
7658         uint64_t hena = 0;
7659         int i;
7660
7661         if (!flags)
7662                 return hena;
7663
7664         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7665                 if (flags & (1ULL << i))
7666                         hena |= adapter->pctypes_tbl[i];
7667         }
7668
7669         return hena;
7670 }
7671
7672 /* Parse the hash enable flags */
7673 uint64_t
7674 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7675 {
7676         uint64_t rss_hf = 0;
7677
7678         if (!flags)
7679                 return rss_hf;
7680         int i;
7681
7682         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7683                 if (flags & adapter->pctypes_tbl[i])
7684                         rss_hf |= (1ULL << i);
7685         }
7686         return rss_hf;
7687 }
7688
7689 /* Disable RSS */
7690 static void
7691 i40e_pf_disable_rss(struct i40e_pf *pf)
7692 {
7693         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7694
7695         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7696         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7697         I40E_WRITE_FLUSH(hw);
7698 }
7699
7700 int
7701 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7702 {
7703         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7704         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7705         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7706                            I40E_VFQF_HKEY_MAX_INDEX :
7707                            I40E_PFQF_HKEY_MAX_INDEX;
7708         int ret = 0;
7709
7710         if (!key || key_len == 0) {
7711                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7712                 return 0;
7713         } else if (key_len != (key_idx + 1) *
7714                 sizeof(uint32_t)) {
7715                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7716                 return -EINVAL;
7717         }
7718
7719         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7720                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7721                         (struct i40e_aqc_get_set_rss_key_data *)key;
7722
7723                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7724                 if (ret)
7725                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7726         } else {
7727                 uint32_t *hash_key = (uint32_t *)key;
7728                 uint16_t i;
7729
7730                 if (vsi->type == I40E_VSI_SRIOV) {
7731                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7732                                 I40E_WRITE_REG(
7733                                         hw,
7734                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7735                                         hash_key[i]);
7736
7737                 } else {
7738                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7739                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7740                                                hash_key[i]);
7741                 }
7742                 I40E_WRITE_FLUSH(hw);
7743         }
7744
7745         return ret;
7746 }
7747
7748 static int
7749 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7750 {
7751         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7752         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7753         uint32_t reg;
7754         int ret;
7755
7756         if (!key || !key_len)
7757                 return 0;
7758
7759         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7760                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7761                         (struct i40e_aqc_get_set_rss_key_data *)key);
7762                 if (ret) {
7763                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7764                         return ret;
7765                 }
7766         } else {
7767                 uint32_t *key_dw = (uint32_t *)key;
7768                 uint16_t i;
7769
7770                 if (vsi->type == I40E_VSI_SRIOV) {
7771                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7772                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7773                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7774                         }
7775                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7776                                    sizeof(uint32_t);
7777                 } else {
7778                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7779                                 reg = I40E_PFQF_HKEY(i);
7780                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7781                         }
7782                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7783                                    sizeof(uint32_t);
7784                 }
7785         }
7786         return 0;
7787 }
7788
7789 static int
7790 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7791 {
7792         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7793         uint64_t hena;
7794         int ret;
7795
7796         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7797                                rss_conf->rss_key_len);
7798         if (ret)
7799                 return ret;
7800
7801         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7802         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7803         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7804         I40E_WRITE_FLUSH(hw);
7805
7806         return 0;
7807 }
7808
7809 static int
7810 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7811                          struct rte_eth_rss_conf *rss_conf)
7812 {
7813         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7814         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7815         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7816         uint64_t hena;
7817
7818         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7819         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7820
7821         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7822                 if (rss_hf != 0) /* Enable RSS */
7823                         return -EINVAL;
7824                 return 0; /* Nothing to do */
7825         }
7826         /* RSS enabled */
7827         if (rss_hf == 0) /* Disable RSS */
7828                 return -EINVAL;
7829
7830         return i40e_hw_rss_hash_set(pf, rss_conf);
7831 }
7832
7833 static int
7834 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7835                            struct rte_eth_rss_conf *rss_conf)
7836 {
7837         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7838         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7839         uint64_t hena;
7840         int ret;
7841
7842         if (!rss_conf)
7843                 return -EINVAL;
7844
7845         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7846                          &rss_conf->rss_key_len);
7847         if (ret)
7848                 return ret;
7849
7850         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7851         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7852         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7853
7854         return 0;
7855 }
7856
7857 static int
7858 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7859 {
7860         switch (filter_type) {
7861         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7862                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7863                 break;
7864         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7865                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7866                 break;
7867         case RTE_TUNNEL_FILTER_IMAC_TENID:
7868                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7869                 break;
7870         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7871                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7872                 break;
7873         case ETH_TUNNEL_FILTER_IMAC:
7874                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7875                 break;
7876         case ETH_TUNNEL_FILTER_OIP:
7877                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7878                 break;
7879         case ETH_TUNNEL_FILTER_IIP:
7880                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7881                 break;
7882         default:
7883                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7884                 return -EINVAL;
7885         }
7886
7887         return 0;
7888 }
7889
7890 /* Convert tunnel filter structure */
7891 static int
7892 i40e_tunnel_filter_convert(
7893         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7894         struct i40e_tunnel_filter *tunnel_filter)
7895 {
7896         rte_ether_addr_copy((struct rte_ether_addr *)
7897                         &cld_filter->element.outer_mac,
7898                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7899         rte_ether_addr_copy((struct rte_ether_addr *)
7900                         &cld_filter->element.inner_mac,
7901                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7902         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7903         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7904              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7905             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7906                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7907         else
7908                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7909         tunnel_filter->input.flags = cld_filter->element.flags;
7910         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7911         tunnel_filter->queue = cld_filter->element.queue_number;
7912         rte_memcpy(tunnel_filter->input.general_fields,
7913                    cld_filter->general_fields,
7914                    sizeof(cld_filter->general_fields));
7915
7916         return 0;
7917 }
7918
7919 /* Check if there exists the tunnel filter */
7920 struct i40e_tunnel_filter *
7921 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7922                              const struct i40e_tunnel_filter_input *input)
7923 {
7924         int ret;
7925
7926         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7927         if (ret < 0)
7928                 return NULL;
7929
7930         return tunnel_rule->hash_map[ret];
7931 }
7932
7933 /* Add a tunnel filter into the SW list */
7934 static int
7935 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7936                              struct i40e_tunnel_filter *tunnel_filter)
7937 {
7938         struct i40e_tunnel_rule *rule = &pf->tunnel;
7939         int ret;
7940
7941         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7942         if (ret < 0) {
7943                 PMD_DRV_LOG(ERR,
7944                             "Failed to insert tunnel filter to hash table %d!",
7945                             ret);
7946                 return ret;
7947         }
7948         rule->hash_map[ret] = tunnel_filter;
7949
7950         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7951
7952         return 0;
7953 }
7954
7955 /* Delete a tunnel filter from the SW list */
7956 int
7957 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7958                           struct i40e_tunnel_filter_input *input)
7959 {
7960         struct i40e_tunnel_rule *rule = &pf->tunnel;
7961         struct i40e_tunnel_filter *tunnel_filter;
7962         int ret;
7963
7964         ret = rte_hash_del_key(rule->hash_table, input);
7965         if (ret < 0) {
7966                 PMD_DRV_LOG(ERR,
7967                             "Failed to delete tunnel filter to hash table %d!",
7968                             ret);
7969                 return ret;
7970         }
7971         tunnel_filter = rule->hash_map[ret];
7972         rule->hash_map[ret] = NULL;
7973
7974         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7975         rte_free(tunnel_filter);
7976
7977         return 0;
7978 }
7979
7980 int
7981 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7982                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7983                         uint8_t add)
7984 {
7985         uint16_t ip_type;
7986         uint32_t ipv4_addr, ipv4_addr_le;
7987         uint8_t i, tun_type = 0;
7988         /* internal varialbe to convert ipv6 byte order */
7989         uint32_t convert_ipv6[4];
7990         int val, ret = 0;
7991         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7992         struct i40e_vsi *vsi = pf->main_vsi;
7993         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
7994         struct i40e_aqc_cloud_filters_element_bb *pfilter;
7995         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7996         struct i40e_tunnel_filter *tunnel, *node;
7997         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7998
7999         cld_filter = rte_zmalloc("tunnel_filter",
8000                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8001         0);
8002
8003         if (NULL == cld_filter) {
8004                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8005                 return -ENOMEM;
8006         }
8007         pfilter = cld_filter;
8008
8009         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8010                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8011         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8012                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8013
8014         pfilter->element.inner_vlan =
8015                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8016         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
8017                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8018                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8019                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8020                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8021                                 &ipv4_addr_le,
8022                                 sizeof(pfilter->element.ipaddr.v4.data));
8023         } else {
8024                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8025                 for (i = 0; i < 4; i++) {
8026                         convert_ipv6[i] =
8027                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
8028                 }
8029                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8030                            &convert_ipv6,
8031                            sizeof(pfilter->element.ipaddr.v6.data));
8032         }
8033
8034         /* check tunneled type */
8035         switch (tunnel_filter->tunnel_type) {
8036         case RTE_TUNNEL_TYPE_VXLAN:
8037                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8038                 break;
8039         case RTE_TUNNEL_TYPE_NVGRE:
8040                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8041                 break;
8042         case RTE_TUNNEL_TYPE_IP_IN_GRE:
8043                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8044                 break;
8045         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8046                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
8047                 break;
8048         default:
8049                 /* Other tunnel types is not supported. */
8050                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8051                 rte_free(cld_filter);
8052                 return -EINVAL;
8053         }
8054
8055         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8056                                        &pfilter->element.flags);
8057         if (val < 0) {
8058                 rte_free(cld_filter);
8059                 return -EINVAL;
8060         }
8061
8062         pfilter->element.flags |= rte_cpu_to_le_16(
8063                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8064                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8065         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8066         pfilter->element.queue_number =
8067                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8068
8069         /* Check if there is the filter in SW list */
8070         memset(&check_filter, 0, sizeof(check_filter));
8071         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8072         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8073         if (add && node) {
8074                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8075                 rte_free(cld_filter);
8076                 return -EINVAL;
8077         }
8078
8079         if (!add && !node) {
8080                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8081                 rte_free(cld_filter);
8082                 return -EINVAL;
8083         }
8084
8085         if (add) {
8086                 ret = i40e_aq_add_cloud_filters(hw,
8087                                         vsi->seid, &cld_filter->element, 1);
8088                 if (ret < 0) {
8089                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8090                         rte_free(cld_filter);
8091                         return -ENOTSUP;
8092                 }
8093                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8094                 if (tunnel == NULL) {
8095                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8096                         rte_free(cld_filter);
8097                         return -ENOMEM;
8098                 }
8099
8100                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8101                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8102                 if (ret < 0)
8103                         rte_free(tunnel);
8104         } else {
8105                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8106                                                    &cld_filter->element, 1);
8107                 if (ret < 0) {
8108                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8109                         rte_free(cld_filter);
8110                         return -ENOTSUP;
8111                 }
8112                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8113         }
8114
8115         rte_free(cld_filter);
8116         return ret;
8117 }
8118
8119 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
8120 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
8121 #define I40E_TR_GENEVE_KEY_MASK                 0x8
8122 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
8123 #define I40E_TR_GRE_KEY_MASK                    0x400
8124 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
8125 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
8126 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
8127 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
8128 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
8129 #define I40E_DIRECTION_INGRESS_KEY              0x8000
8130 #define I40E_TR_L4_TYPE_TCP                     0x2
8131 #define I40E_TR_L4_TYPE_UDP                     0x4
8132 #define I40E_TR_L4_TYPE_SCTP                    0x8
8133
8134 static enum
8135 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
8136 {
8137         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8138         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8139         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8140         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8141         enum i40e_status_code status = I40E_SUCCESS;
8142
8143         if (pf->support_multi_driver) {
8144                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8145                 return I40E_NOT_SUPPORTED;
8146         }
8147
8148         memset(&filter_replace, 0,
8149                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8150         memset(&filter_replace_buf, 0,
8151                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8152
8153         /* create L1 filter */
8154         filter_replace.old_filter_type =
8155                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8156         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8157         filter_replace.tr_bit = 0;
8158
8159         /* Prepare the buffer, 3 entries */
8160         filter_replace_buf.data[0] =
8161                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8162         filter_replace_buf.data[0] |=
8163                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8164         filter_replace_buf.data[2] = 0xFF;
8165         filter_replace_buf.data[3] = 0xFF;
8166         filter_replace_buf.data[4] =
8167                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8168         filter_replace_buf.data[4] |=
8169                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8170         filter_replace_buf.data[7] = 0xF0;
8171         filter_replace_buf.data[8]
8172                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
8173         filter_replace_buf.data[8] |=
8174                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8175         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
8176                 I40E_TR_GENEVE_KEY_MASK |
8177                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
8178         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
8179                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
8180                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
8181
8182         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8183                                                &filter_replace_buf);
8184         if (!status && (filter_replace.old_filter_type !=
8185                         filter_replace.new_filter_type))
8186                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8187                             " original: 0x%x, new: 0x%x",
8188                             dev->device->name,
8189                             filter_replace.old_filter_type,
8190                             filter_replace.new_filter_type);
8191
8192         return status;
8193 }
8194
8195 static enum
8196 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
8197 {
8198         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8199         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8200         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8201         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8202         enum i40e_status_code status = I40E_SUCCESS;
8203
8204         if (pf->support_multi_driver) {
8205                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8206                 return I40E_NOT_SUPPORTED;
8207         }
8208
8209         /* For MPLSoUDP */
8210         memset(&filter_replace, 0,
8211                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8212         memset(&filter_replace_buf, 0,
8213                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8214         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8215                 I40E_AQC_MIRROR_CLOUD_FILTER;
8216         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8217         filter_replace.new_filter_type =
8218                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8219         /* Prepare the buffer, 2 entries */
8220         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8221         filter_replace_buf.data[0] |=
8222                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8223         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8224         filter_replace_buf.data[4] |=
8225                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8226         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8227                                                &filter_replace_buf);
8228         if (status < 0)
8229                 return status;
8230         if (filter_replace.old_filter_type !=
8231             filter_replace.new_filter_type)
8232                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8233                             " original: 0x%x, new: 0x%x",
8234                             dev->device->name,
8235                             filter_replace.old_filter_type,
8236                             filter_replace.new_filter_type);
8237
8238         /* For MPLSoGRE */
8239         memset(&filter_replace, 0,
8240                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8241         memset(&filter_replace_buf, 0,
8242                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8243
8244         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8245                 I40E_AQC_MIRROR_CLOUD_FILTER;
8246         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
8247         filter_replace.new_filter_type =
8248                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8249         /* Prepare the buffer, 2 entries */
8250         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8251         filter_replace_buf.data[0] |=
8252                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8253         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8254         filter_replace_buf.data[4] |=
8255                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8256
8257         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8258                                                &filter_replace_buf);
8259         if (!status && (filter_replace.old_filter_type !=
8260                         filter_replace.new_filter_type))
8261                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8262                             " original: 0x%x, new: 0x%x",
8263                             dev->device->name,
8264                             filter_replace.old_filter_type,
8265                             filter_replace.new_filter_type);
8266
8267         return status;
8268 }
8269
8270 static enum i40e_status_code
8271 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8272 {
8273         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8274         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8275         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8276         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8277         enum i40e_status_code status = I40E_SUCCESS;
8278
8279         if (pf->support_multi_driver) {
8280                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8281                 return I40E_NOT_SUPPORTED;
8282         }
8283
8284         /* For GTP-C */
8285         memset(&filter_replace, 0,
8286                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8287         memset(&filter_replace_buf, 0,
8288                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8289         /* create L1 filter */
8290         filter_replace.old_filter_type =
8291                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8292         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8293         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8294                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8295         /* Prepare the buffer, 2 entries */
8296         filter_replace_buf.data[0] =
8297                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8298         filter_replace_buf.data[0] |=
8299                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8300         filter_replace_buf.data[2] = 0xFF;
8301         filter_replace_buf.data[3] = 0xFF;
8302         filter_replace_buf.data[4] =
8303                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8304         filter_replace_buf.data[4] |=
8305                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8306         filter_replace_buf.data[6] = 0xFF;
8307         filter_replace_buf.data[7] = 0xFF;
8308         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8309                                                &filter_replace_buf);
8310         if (status < 0)
8311                 return status;
8312         if (filter_replace.old_filter_type !=
8313             filter_replace.new_filter_type)
8314                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8315                             " original: 0x%x, new: 0x%x",
8316                             dev->device->name,
8317                             filter_replace.old_filter_type,
8318                             filter_replace.new_filter_type);
8319
8320         /* for GTP-U */
8321         memset(&filter_replace, 0,
8322                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8323         memset(&filter_replace_buf, 0,
8324                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8325         /* create L1 filter */
8326         filter_replace.old_filter_type =
8327                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8328         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8329         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8330                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8331         /* Prepare the buffer, 2 entries */
8332         filter_replace_buf.data[0] =
8333                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8334         filter_replace_buf.data[0] |=
8335                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8336         filter_replace_buf.data[2] = 0xFF;
8337         filter_replace_buf.data[3] = 0xFF;
8338         filter_replace_buf.data[4] =
8339                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8340         filter_replace_buf.data[4] |=
8341                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8342         filter_replace_buf.data[6] = 0xFF;
8343         filter_replace_buf.data[7] = 0xFF;
8344
8345         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8346                                                &filter_replace_buf);
8347         if (!status && (filter_replace.old_filter_type !=
8348                         filter_replace.new_filter_type))
8349                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8350                             " original: 0x%x, new: 0x%x",
8351                             dev->device->name,
8352                             filter_replace.old_filter_type,
8353                             filter_replace.new_filter_type);
8354
8355         return status;
8356 }
8357
8358 static enum
8359 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8360 {
8361         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8362         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8363         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8364         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8365         enum i40e_status_code status = I40E_SUCCESS;
8366
8367         if (pf->support_multi_driver) {
8368                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8369                 return I40E_NOT_SUPPORTED;
8370         }
8371
8372         /* for GTP-C */
8373         memset(&filter_replace, 0,
8374                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8375         memset(&filter_replace_buf, 0,
8376                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8377         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8378         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8379         filter_replace.new_filter_type =
8380                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8381         /* Prepare the buffer, 2 entries */
8382         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8383         filter_replace_buf.data[0] |=
8384                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8385         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8386         filter_replace_buf.data[4] |=
8387                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8388         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8389                                                &filter_replace_buf);
8390         if (status < 0)
8391                 return status;
8392         if (filter_replace.old_filter_type !=
8393             filter_replace.new_filter_type)
8394                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8395                             " original: 0x%x, new: 0x%x",
8396                             dev->device->name,
8397                             filter_replace.old_filter_type,
8398                             filter_replace.new_filter_type);
8399
8400         /* for GTP-U */
8401         memset(&filter_replace, 0,
8402                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8403         memset(&filter_replace_buf, 0,
8404                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8405         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8406         filter_replace.old_filter_type =
8407                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8408         filter_replace.new_filter_type =
8409                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8410         /* Prepare the buffer, 2 entries */
8411         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8412         filter_replace_buf.data[0] |=
8413                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8414         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8415         filter_replace_buf.data[4] |=
8416                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8417
8418         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8419                                                &filter_replace_buf);
8420         if (!status && (filter_replace.old_filter_type !=
8421                         filter_replace.new_filter_type))
8422                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8423                             " original: 0x%x, new: 0x%x",
8424                             dev->device->name,
8425                             filter_replace.old_filter_type,
8426                             filter_replace.new_filter_type);
8427
8428         return status;
8429 }
8430
8431 static enum i40e_status_code
8432 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8433                             enum i40e_l4_port_type l4_port_type)
8434 {
8435         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8436         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8437         enum i40e_status_code status = I40E_SUCCESS;
8438         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8439         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8440
8441         if (pf->support_multi_driver) {
8442                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8443                 return I40E_NOT_SUPPORTED;
8444         }
8445
8446         memset(&filter_replace, 0,
8447                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8448         memset(&filter_replace_buf, 0,
8449                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8450
8451         /* create L1 filter */
8452         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8453                 filter_replace.old_filter_type =
8454                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8455                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8456                 filter_replace_buf.data[8] =
8457                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8458         } else {
8459                 filter_replace.old_filter_type =
8460                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8461                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8462                 filter_replace_buf.data[8] =
8463                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8464         }
8465
8466         filter_replace.tr_bit = 0;
8467         /* Prepare the buffer, 3 entries */
8468         filter_replace_buf.data[0] =
8469                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8470         filter_replace_buf.data[0] |=
8471                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8472         filter_replace_buf.data[2] = 0x00;
8473         filter_replace_buf.data[3] =
8474                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8475         filter_replace_buf.data[4] =
8476                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8477         filter_replace_buf.data[4] |=
8478                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8479         filter_replace_buf.data[5] = 0x00;
8480         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8481                 I40E_TR_L4_TYPE_TCP |
8482                 I40E_TR_L4_TYPE_SCTP;
8483         filter_replace_buf.data[7] = 0x00;
8484         filter_replace_buf.data[8] |=
8485                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8486         filter_replace_buf.data[9] = 0x00;
8487         filter_replace_buf.data[10] = 0xFF;
8488         filter_replace_buf.data[11] = 0xFF;
8489
8490         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8491                                                &filter_replace_buf);
8492         if (!status && filter_replace.old_filter_type !=
8493             filter_replace.new_filter_type)
8494                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8495                             " original: 0x%x, new: 0x%x",
8496                             dev->device->name,
8497                             filter_replace.old_filter_type,
8498                             filter_replace.new_filter_type);
8499
8500         return status;
8501 }
8502
8503 static enum i40e_status_code
8504 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8505                                enum i40e_l4_port_type l4_port_type)
8506 {
8507         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8508         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8509         enum i40e_status_code status = I40E_SUCCESS;
8510         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8511         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8512
8513         if (pf->support_multi_driver) {
8514                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8515                 return I40E_NOT_SUPPORTED;
8516         }
8517
8518         memset(&filter_replace, 0,
8519                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8520         memset(&filter_replace_buf, 0,
8521                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8522
8523         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8524                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8525                 filter_replace.new_filter_type =
8526                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8527                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8528         } else {
8529                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8530                 filter_replace.new_filter_type =
8531                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8532                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8533         }
8534
8535         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8536         filter_replace.tr_bit = 0;
8537         /* Prepare the buffer, 2 entries */
8538         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8539         filter_replace_buf.data[0] |=
8540                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8541         filter_replace_buf.data[4] |=
8542                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8543         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8544                                                &filter_replace_buf);
8545
8546         if (!status && filter_replace.old_filter_type !=
8547             filter_replace.new_filter_type)
8548                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8549                             " original: 0x%x, new: 0x%x",
8550                             dev->device->name,
8551                             filter_replace.old_filter_type,
8552                             filter_replace.new_filter_type);
8553
8554         return status;
8555 }
8556
8557 int
8558 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8559                       struct i40e_tunnel_filter_conf *tunnel_filter,
8560                       uint8_t add)
8561 {
8562         uint16_t ip_type;
8563         uint32_t ipv4_addr, ipv4_addr_le;
8564         uint8_t i, tun_type = 0;
8565         /* internal variable to convert ipv6 byte order */
8566         uint32_t convert_ipv6[4];
8567         int val, ret = 0;
8568         struct i40e_pf_vf *vf = NULL;
8569         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8570         struct i40e_vsi *vsi;
8571         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8572         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8573         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8574         struct i40e_tunnel_filter *tunnel, *node;
8575         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8576         uint32_t teid_le;
8577         bool big_buffer = 0;
8578
8579         cld_filter = rte_zmalloc("tunnel_filter",
8580                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8581                          0);
8582
8583         if (cld_filter == NULL) {
8584                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8585                 return -ENOMEM;
8586         }
8587         pfilter = cld_filter;
8588
8589         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8590                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8591         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8592                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8593
8594         pfilter->element.inner_vlan =
8595                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8596         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8597                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8598                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8599                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8600                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8601                                 &ipv4_addr_le,
8602                                 sizeof(pfilter->element.ipaddr.v4.data));
8603         } else {
8604                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8605                 for (i = 0; i < 4; i++) {
8606                         convert_ipv6[i] =
8607                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8608                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8609                 }
8610                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8611                            &convert_ipv6,
8612                            sizeof(pfilter->element.ipaddr.v6.data));
8613         }
8614
8615         /* check tunneled type */
8616         switch (tunnel_filter->tunnel_type) {
8617         case I40E_TUNNEL_TYPE_VXLAN:
8618                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8619                 break;
8620         case I40E_TUNNEL_TYPE_NVGRE:
8621                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8622                 break;
8623         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8624                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8625                 break;
8626         case I40E_TUNNEL_TYPE_MPLSoUDP:
8627                 if (!pf->mpls_replace_flag) {
8628                         i40e_replace_mpls_l1_filter(pf);
8629                         i40e_replace_mpls_cloud_filter(pf);
8630                         pf->mpls_replace_flag = 1;
8631                 }
8632                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8633                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8634                         teid_le >> 4;
8635                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8636                         (teid_le & 0xF) << 12;
8637                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8638                         0x40;
8639                 big_buffer = 1;
8640                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8641                 break;
8642         case I40E_TUNNEL_TYPE_MPLSoGRE:
8643                 if (!pf->mpls_replace_flag) {
8644                         i40e_replace_mpls_l1_filter(pf);
8645                         i40e_replace_mpls_cloud_filter(pf);
8646                         pf->mpls_replace_flag = 1;
8647                 }
8648                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8649                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8650                         teid_le >> 4;
8651                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8652                         (teid_le & 0xF) << 12;
8653                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8654                         0x0;
8655                 big_buffer = 1;
8656                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8657                 break;
8658         case I40E_TUNNEL_TYPE_GTPC:
8659                 if (!pf->gtp_replace_flag) {
8660                         i40e_replace_gtp_l1_filter(pf);
8661                         i40e_replace_gtp_cloud_filter(pf);
8662                         pf->gtp_replace_flag = 1;
8663                 }
8664                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8665                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8666                         (teid_le >> 16) & 0xFFFF;
8667                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8668                         teid_le & 0xFFFF;
8669                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8670                         0x0;
8671                 big_buffer = 1;
8672                 break;
8673         case I40E_TUNNEL_TYPE_GTPU:
8674                 if (!pf->gtp_replace_flag) {
8675                         i40e_replace_gtp_l1_filter(pf);
8676                         i40e_replace_gtp_cloud_filter(pf);
8677                         pf->gtp_replace_flag = 1;
8678                 }
8679                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8680                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8681                         (teid_le >> 16) & 0xFFFF;
8682                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8683                         teid_le & 0xFFFF;
8684                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8685                         0x0;
8686                 big_buffer = 1;
8687                 break;
8688         case I40E_TUNNEL_TYPE_QINQ:
8689                 if (!pf->qinq_replace_flag) {
8690                         ret = i40e_cloud_filter_qinq_create(pf);
8691                         if (ret < 0)
8692                                 PMD_DRV_LOG(DEBUG,
8693                                             "QinQ tunnel filter already created.");
8694                         pf->qinq_replace_flag = 1;
8695                 }
8696                 /*      Add in the General fields the values of
8697                  *      the Outer and Inner VLAN
8698                  *      Big Buffer should be set, see changes in
8699                  *      i40e_aq_add_cloud_filters
8700                  */
8701                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8702                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8703                 big_buffer = 1;
8704                 break;
8705         case I40E_CLOUD_TYPE_UDP:
8706         case I40E_CLOUD_TYPE_TCP:
8707         case I40E_CLOUD_TYPE_SCTP:
8708                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8709                         if (!pf->sport_replace_flag) {
8710                                 i40e_replace_port_l1_filter(pf,
8711                                                 tunnel_filter->l4_port_type);
8712                                 i40e_replace_port_cloud_filter(pf,
8713                                                 tunnel_filter->l4_port_type);
8714                                 pf->sport_replace_flag = 1;
8715                         }
8716                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8717                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8718                                 I40E_DIRECTION_INGRESS_KEY;
8719
8720                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8721                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8722                                         I40E_TR_L4_TYPE_UDP;
8723                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8724                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8725                                         I40E_TR_L4_TYPE_TCP;
8726                         else
8727                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8728                                         I40E_TR_L4_TYPE_SCTP;
8729
8730                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8731                                 (teid_le >> 16) & 0xFFFF;
8732                         big_buffer = 1;
8733                 } else {
8734                         if (!pf->dport_replace_flag) {
8735                                 i40e_replace_port_l1_filter(pf,
8736                                                 tunnel_filter->l4_port_type);
8737                                 i40e_replace_port_cloud_filter(pf,
8738                                                 tunnel_filter->l4_port_type);
8739                                 pf->dport_replace_flag = 1;
8740                         }
8741                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8742                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8743                                 I40E_DIRECTION_INGRESS_KEY;
8744
8745                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8746                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8747                                         I40E_TR_L4_TYPE_UDP;
8748                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8749                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8750                                         I40E_TR_L4_TYPE_TCP;
8751                         else
8752                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8753                                         I40E_TR_L4_TYPE_SCTP;
8754
8755                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8756                                 (teid_le >> 16) & 0xFFFF;
8757                         big_buffer = 1;
8758                 }
8759
8760                 break;
8761         default:
8762                 /* Other tunnel types is not supported. */
8763                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8764                 rte_free(cld_filter);
8765                 return -EINVAL;
8766         }
8767
8768         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8769                 pfilter->element.flags =
8770                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8771         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8772                 pfilter->element.flags =
8773                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8774         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8775                 pfilter->element.flags =
8776                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8777         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8778                 pfilter->element.flags =
8779                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8780         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8781                 pfilter->element.flags |=
8782                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8783         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8784                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8785                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8786                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8787                         pfilter->element.flags |=
8788                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8789                 else
8790                         pfilter->element.flags |=
8791                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8792         } else {
8793                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8794                                                 &pfilter->element.flags);
8795                 if (val < 0) {
8796                         rte_free(cld_filter);
8797                         return -EINVAL;
8798                 }
8799         }
8800
8801         pfilter->element.flags |= rte_cpu_to_le_16(
8802                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8803                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8804         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8805         pfilter->element.queue_number =
8806                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8807
8808         if (!tunnel_filter->is_to_vf)
8809                 vsi = pf->main_vsi;
8810         else {
8811                 if (tunnel_filter->vf_id >= pf->vf_num) {
8812                         PMD_DRV_LOG(ERR, "Invalid argument.");
8813                         rte_free(cld_filter);
8814                         return -EINVAL;
8815                 }
8816                 vf = &pf->vfs[tunnel_filter->vf_id];
8817                 vsi = vf->vsi;
8818         }
8819
8820         /* Check if there is the filter in SW list */
8821         memset(&check_filter, 0, sizeof(check_filter));
8822         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8823         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8824         check_filter.vf_id = tunnel_filter->vf_id;
8825         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8826         if (add && node) {
8827                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8828                 rte_free(cld_filter);
8829                 return -EINVAL;
8830         }
8831
8832         if (!add && !node) {
8833                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8834                 rte_free(cld_filter);
8835                 return -EINVAL;
8836         }
8837
8838         if (add) {
8839                 if (big_buffer)
8840                         ret = i40e_aq_add_cloud_filters_bb(hw,
8841                                                    vsi->seid, cld_filter, 1);
8842                 else
8843                         ret = i40e_aq_add_cloud_filters(hw,
8844                                         vsi->seid, &cld_filter->element, 1);
8845                 if (ret < 0) {
8846                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8847                         rte_free(cld_filter);
8848                         return -ENOTSUP;
8849                 }
8850                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8851                 if (tunnel == NULL) {
8852                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8853                         rte_free(cld_filter);
8854                         return -ENOMEM;
8855                 }
8856
8857                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8858                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8859                 if (ret < 0)
8860                         rte_free(tunnel);
8861         } else {
8862                 if (big_buffer)
8863                         ret = i40e_aq_rem_cloud_filters_bb(
8864                                 hw, vsi->seid, cld_filter, 1);
8865                 else
8866                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8867                                                 &cld_filter->element, 1);
8868                 if (ret < 0) {
8869                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8870                         rte_free(cld_filter);
8871                         return -ENOTSUP;
8872                 }
8873                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8874         }
8875
8876         rte_free(cld_filter);
8877         return ret;
8878 }
8879
8880 static int
8881 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8882 {
8883         uint8_t i;
8884
8885         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8886                 if (pf->vxlan_ports[i] == port)
8887                         return i;
8888         }
8889
8890         return -1;
8891 }
8892
8893 static int
8894 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8895 {
8896         int  idx, ret;
8897         uint8_t filter_idx = 0;
8898         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8899
8900         idx = i40e_get_vxlan_port_idx(pf, port);
8901
8902         /* Check if port already exists */
8903         if (idx >= 0) {
8904                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8905                 return -EINVAL;
8906         }
8907
8908         /* Now check if there is space to add the new port */
8909         idx = i40e_get_vxlan_port_idx(pf, 0);
8910         if (idx < 0) {
8911                 PMD_DRV_LOG(ERR,
8912                         "Maximum number of UDP ports reached, not adding port %d",
8913                         port);
8914                 return -ENOSPC;
8915         }
8916
8917         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8918                                         &filter_idx, NULL);
8919         if (ret < 0) {
8920                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8921                 return -1;
8922         }
8923
8924         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8925                          port,  filter_idx);
8926
8927         /* New port: add it and mark its index in the bitmap */
8928         pf->vxlan_ports[idx] = port;
8929         pf->vxlan_bitmap |= (1 << idx);
8930
8931         if (!(pf->flags & I40E_FLAG_VXLAN))
8932                 pf->flags |= I40E_FLAG_VXLAN;
8933
8934         return 0;
8935 }
8936
8937 static int
8938 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8939 {
8940         int idx;
8941         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8942
8943         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8944                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8945                 return -EINVAL;
8946         }
8947
8948         idx = i40e_get_vxlan_port_idx(pf, port);
8949
8950         if (idx < 0) {
8951                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8952                 return -EINVAL;
8953         }
8954
8955         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8956                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8957                 return -1;
8958         }
8959
8960         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8961                         port, idx);
8962
8963         pf->vxlan_ports[idx] = 0;
8964         pf->vxlan_bitmap &= ~(1 << idx);
8965
8966         if (!pf->vxlan_bitmap)
8967                 pf->flags &= ~I40E_FLAG_VXLAN;
8968
8969         return 0;
8970 }
8971
8972 /* Add UDP tunneling port */
8973 static int
8974 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8975                              struct rte_eth_udp_tunnel *udp_tunnel)
8976 {
8977         int ret = 0;
8978         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8979
8980         if (udp_tunnel == NULL)
8981                 return -EINVAL;
8982
8983         switch (udp_tunnel->prot_type) {
8984         case RTE_TUNNEL_TYPE_VXLAN:
8985                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8986                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8987                 break;
8988         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8989                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8990                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8991                 break;
8992         case RTE_TUNNEL_TYPE_GENEVE:
8993         case RTE_TUNNEL_TYPE_TEREDO:
8994                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8995                 ret = -1;
8996                 break;
8997
8998         default:
8999                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
9000                 ret = -1;
9001                 break;
9002         }
9003
9004         return ret;
9005 }
9006
9007 /* Remove UDP tunneling port */
9008 static int
9009 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
9010                              struct rte_eth_udp_tunnel *udp_tunnel)
9011 {
9012         int ret = 0;
9013         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9014
9015         if (udp_tunnel == NULL)
9016                 return -EINVAL;
9017
9018         switch (udp_tunnel->prot_type) {
9019         case RTE_TUNNEL_TYPE_VXLAN:
9020         case RTE_TUNNEL_TYPE_VXLAN_GPE:
9021                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
9022                 break;
9023         case RTE_TUNNEL_TYPE_GENEVE:
9024         case RTE_TUNNEL_TYPE_TEREDO:
9025                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
9026                 ret = -1;
9027                 break;
9028         default:
9029                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
9030                 ret = -1;
9031                 break;
9032         }
9033
9034         return ret;
9035 }
9036
9037 /* Calculate the maximum number of contiguous PF queues that are configured */
9038 static int
9039 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
9040 {
9041         struct rte_eth_dev_data *data = pf->dev_data;
9042         int i, num;
9043         struct i40e_rx_queue *rxq;
9044
9045         num = 0;
9046         for (i = 0; i < pf->lan_nb_qps; i++) {
9047                 rxq = data->rx_queues[i];
9048                 if (rxq && rxq->q_set)
9049                         num++;
9050                 else
9051                         break;
9052         }
9053
9054         return num;
9055 }
9056
9057 /* Configure RSS */
9058 static int
9059 i40e_pf_config_rss(struct i40e_pf *pf)
9060 {
9061         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
9062         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9063         struct rte_eth_rss_conf rss_conf;
9064         uint32_t i, lut = 0;
9065         uint16_t j, num;
9066
9067         /*
9068          * If both VMDQ and RSS enabled, not all of PF queues are configured.
9069          * It's necessary to calculate the actual PF queues that are configured.
9070          */
9071         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
9072                 num = i40e_pf_calc_configured_queues_num(pf);
9073         else
9074                 num = pf->dev_data->nb_rx_queues;
9075
9076         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
9077         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
9078                         num);
9079
9080         if (num == 0) {
9081                 PMD_INIT_LOG(ERR,
9082                         "No PF queues are configured to enable RSS for port %u",
9083                         pf->dev_data->port_id);
9084                 return -ENOTSUP;
9085         }
9086
9087         if (pf->adapter->rss_reta_updated == 0) {
9088                 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
9089                         if (j == num)
9090                                 j = 0;
9091                         lut = (lut << 8) | (j & ((0x1 <<
9092                                 hw->func_caps.rss_table_entry_width) - 1));
9093                         if ((i & 3) == 3)
9094                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
9095                                                rte_bswap32(lut));
9096                 }
9097         }
9098
9099         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
9100         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
9101             !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
9102                 i40e_pf_disable_rss(pf);
9103                 return 0;
9104         }
9105         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
9106                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
9107                 /* Random default keys */
9108                 static uint32_t rss_key_default[] = {0x6b793944,
9109                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
9110                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
9111                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
9112
9113                 rss_conf.rss_key = (uint8_t *)rss_key_default;
9114                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
9115                                                         sizeof(uint32_t);
9116         }
9117
9118         return i40e_hw_rss_hash_set(pf, &rss_conf);
9119 }
9120
9121 static int
9122 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
9123                                struct rte_eth_tunnel_filter_conf *filter)
9124 {
9125         if (pf == NULL || filter == NULL) {
9126                 PMD_DRV_LOG(ERR, "Invalid parameter");
9127                 return -EINVAL;
9128         }
9129
9130         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
9131                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9132                 return -EINVAL;
9133         }
9134
9135         if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
9136                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
9137                 return -EINVAL;
9138         }
9139
9140         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
9141                 (rte_is_zero_ether_addr(&filter->outer_mac))) {
9142                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
9143                 return -EINVAL;
9144         }
9145
9146         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
9147                 (rte_is_zero_ether_addr(&filter->inner_mac))) {
9148                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
9149                 return -EINVAL;
9150         }
9151
9152         return 0;
9153 }
9154
9155 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
9156 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
9157 int
9158 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
9159 {
9160         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9161         uint32_t val, reg;
9162         int ret = -EINVAL;
9163
9164         if (pf->support_multi_driver) {
9165                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
9166                 return -ENOTSUP;
9167         }
9168
9169         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
9170         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
9171
9172         if (len == 3) {
9173                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
9174         } else if (len == 4) {
9175                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
9176         } else {
9177                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
9178                 return ret;
9179         }
9180
9181         if (reg != val) {
9182                 ret = i40e_aq_debug_write_global_register(hw,
9183                                                    I40E_GL_PRS_FVBM(2),
9184                                                    reg, NULL);
9185                 if (ret != 0)
9186                         return ret;
9187                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
9188                             "with value 0x%08x",
9189                             I40E_GL_PRS_FVBM(2), reg);
9190         } else {
9191                 ret = 0;
9192         }
9193         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9194                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9195
9196         return ret;
9197 }
9198
9199 static int
9200 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
9201 {
9202         int ret = -EINVAL;
9203
9204         if (!hw || !cfg)
9205                 return -EINVAL;
9206
9207         switch (cfg->cfg_type) {
9208         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
9209                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
9210                 break;
9211         default:
9212                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
9213                 break;
9214         }
9215
9216         return ret;
9217 }
9218
9219 static int
9220 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
9221                                enum rte_filter_op filter_op,
9222                                void *arg)
9223 {
9224         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9225         int ret = I40E_ERR_PARAM;
9226
9227         switch (filter_op) {
9228         case RTE_ETH_FILTER_SET:
9229                 ret = i40e_dev_global_config_set(hw,
9230                         (struct rte_eth_global_cfg *)arg);
9231                 break;
9232         default:
9233                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9234                 break;
9235         }
9236
9237         return ret;
9238 }
9239
9240 static int
9241 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
9242                           enum rte_filter_op filter_op,
9243                           void *arg)
9244 {
9245         struct rte_eth_tunnel_filter_conf *filter;
9246         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9247         int ret = I40E_SUCCESS;
9248
9249         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
9250
9251         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
9252                 return I40E_ERR_PARAM;
9253
9254         switch (filter_op) {
9255         case RTE_ETH_FILTER_NOP:
9256                 if (!(pf->flags & I40E_FLAG_VXLAN))
9257                         ret = I40E_NOT_SUPPORTED;
9258                 break;
9259         case RTE_ETH_FILTER_ADD:
9260                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
9261                 break;
9262         case RTE_ETH_FILTER_DELETE:
9263                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
9264                 break;
9265         default:
9266                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9267                 ret = I40E_ERR_PARAM;
9268                 break;
9269         }
9270
9271         return ret;
9272 }
9273
9274 /* Get the symmetric hash enable configurations per port */
9275 static void
9276 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
9277 {
9278         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9279
9280         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
9281 }
9282
9283 /* Set the symmetric hash enable configurations per port */
9284 static void
9285 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9286 {
9287         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9288
9289         if (enable > 0) {
9290                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
9291                         PMD_DRV_LOG(INFO,
9292                                 "Symmetric hash has already been enabled");
9293                         return;
9294                 }
9295                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9296         } else {
9297                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
9298                         PMD_DRV_LOG(INFO,
9299                                 "Symmetric hash has already been disabled");
9300                         return;
9301                 }
9302                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9303         }
9304         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9305         I40E_WRITE_FLUSH(hw);
9306 }
9307
9308 /*
9309  * Get global configurations of hash function type and symmetric hash enable
9310  * per flow type (pctype). Note that global configuration means it affects all
9311  * the ports on the same NIC.
9312  */
9313 static int
9314 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
9315                                    struct rte_eth_hash_global_conf *g_cfg)
9316 {
9317         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9318         uint32_t reg;
9319         uint16_t i, j;
9320
9321         memset(g_cfg, 0, sizeof(*g_cfg));
9322         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9323         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
9324                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
9325         else
9326                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
9327         PMD_DRV_LOG(DEBUG, "Hash function is %s",
9328                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
9329
9330         /*
9331          * As i40e supports less than 64 flow types, only first 64 bits need to
9332          * be checked.
9333          */
9334         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9335                 g_cfg->valid_bit_mask[i] = 0ULL;
9336                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
9337         }
9338
9339         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
9340
9341         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
9342                 if (!adapter->pctypes_tbl[i])
9343                         continue;
9344                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9345                      j < I40E_FILTER_PCTYPE_MAX; j++) {
9346                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
9347                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
9348                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
9349                                         g_cfg->sym_hash_enable_mask[0] |=
9350                                                                 (1ULL << i);
9351                                 }
9352                         }
9353                 }
9354         }
9355
9356         return 0;
9357 }
9358
9359 static int
9360 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
9361                               const struct rte_eth_hash_global_conf *g_cfg)
9362 {
9363         uint32_t i;
9364         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
9365
9366         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
9367                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
9368                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
9369                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
9370                                                 g_cfg->hash_func);
9371                 return -EINVAL;
9372         }
9373
9374         /*
9375          * As i40e supports less than 64 flow types, only first 64 bits need to
9376          * be checked.
9377          */
9378         mask0 = g_cfg->valid_bit_mask[0];
9379         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9380                 if (i == 0) {
9381                         /* Check if any unsupported flow type configured */
9382                         if ((mask0 | i40e_mask) ^ i40e_mask)
9383                                 goto mask_err;
9384                 } else {
9385                         if (g_cfg->valid_bit_mask[i])
9386                                 goto mask_err;
9387                 }
9388         }
9389
9390         return 0;
9391
9392 mask_err:
9393         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
9394
9395         return -EINVAL;
9396 }
9397
9398 /*
9399  * Set global configurations of hash function type and symmetric hash enable
9400  * per flow type (pctype). Note any modifying global configuration will affect
9401  * all the ports on the same NIC.
9402  */
9403 static int
9404 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
9405                                    struct rte_eth_hash_global_conf *g_cfg)
9406 {
9407         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9408         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9409         int ret;
9410         uint16_t i, j;
9411         uint32_t reg;
9412         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
9413
9414         if (pf->support_multi_driver) {
9415                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
9416                 return -ENOTSUP;
9417         }
9418
9419         /* Check the input parameters */
9420         ret = i40e_hash_global_config_check(adapter, g_cfg);
9421         if (ret < 0)
9422                 return ret;
9423
9424         /*
9425          * As i40e supports less than 64 flow types, only first 64 bits need to
9426          * be configured.
9427          */
9428         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
9429                 if (mask0 & (1UL << i)) {
9430                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
9431                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
9432
9433                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9434                              j < I40E_FILTER_PCTYPE_MAX; j++) {
9435                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
9436                                         i40e_write_global_rx_ctl(hw,
9437                                                           I40E_GLQF_HSYM(j),
9438                                                           reg);
9439                         }
9440                 }
9441         }
9442
9443         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9444         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
9445                 /* Toeplitz */
9446                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
9447                         PMD_DRV_LOG(DEBUG,
9448                                 "Hash function already set to Toeplitz");
9449                         goto out;
9450                 }
9451                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
9452         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
9453                 /* Simple XOR */
9454                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
9455                         PMD_DRV_LOG(DEBUG,
9456                                 "Hash function already set to Simple XOR");
9457                         goto out;
9458                 }
9459                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
9460         } else
9461                 /* Use the default, and keep it as it is */
9462                 goto out;
9463
9464         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
9465
9466 out:
9467         I40E_WRITE_FLUSH(hw);
9468
9469         return 0;
9470 }
9471
9472 /**
9473  * Valid input sets for hash and flow director filters per PCTYPE
9474  */
9475 static uint64_t
9476 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9477                 enum rte_filter_type filter)
9478 {
9479         uint64_t valid;
9480
9481         static const uint64_t valid_hash_inset_table[] = {
9482                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9483                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9484                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9485                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9486                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9487                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9488                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9489                         I40E_INSET_FLEX_PAYLOAD,
9490                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9491                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9492                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9493                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9494                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9495                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9496                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9497                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9498                         I40E_INSET_FLEX_PAYLOAD,
9499                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9500                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9501                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9502                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9503                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9504                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9505                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9506                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9507                         I40E_INSET_FLEX_PAYLOAD,
9508                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9509                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9510                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9511                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9512                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9513                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9514                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9515                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9516                         I40E_INSET_FLEX_PAYLOAD,
9517                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9518                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9519                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9520                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9521                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9522                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9523                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9524                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9525                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9526                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9527                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9528                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9529                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9530                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9531                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9532                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9533                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9534                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9535                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9536                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9537                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9538                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9539                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9540                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9541                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9542                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9543                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9544                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9545                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9546                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9547                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9548                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9549                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9550                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9551                         I40E_INSET_FLEX_PAYLOAD,
9552                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9553                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9554                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9555                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9556                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9557                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9558                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9559                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9560                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9561                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9562                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9563                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9564                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9565                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9566                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9567                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9568                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9569                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9570                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9571                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9572                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9573                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9574                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9575                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9576                         I40E_INSET_FLEX_PAYLOAD,
9577                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9578                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9579                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9580                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9581                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9582                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9583                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9584                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9585                         I40E_INSET_FLEX_PAYLOAD,
9586                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9587                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9588                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9589                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9590                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9591                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9592                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9593                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9594                         I40E_INSET_FLEX_PAYLOAD,
9595                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9596                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9597                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9598                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9599                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9600                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9601                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9602                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9603                         I40E_INSET_FLEX_PAYLOAD,
9604                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9605                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9606                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9607                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9608                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9609                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9610                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9611                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9612                         I40E_INSET_FLEX_PAYLOAD,
9613                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9614                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9615                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9616                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9617                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9618                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9619                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9620                         I40E_INSET_FLEX_PAYLOAD,
9621                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9622                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9623                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9624                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9625                         I40E_INSET_FLEX_PAYLOAD,
9626         };
9627
9628         /**
9629          * Flow director supports only fields defined in
9630          * union rte_eth_fdir_flow.
9631          */
9632         static const uint64_t valid_fdir_inset_table[] = {
9633                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9634                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9635                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9636                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9637                 I40E_INSET_IPV4_TTL,
9638                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9639                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9640                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9641                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9642                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9643                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9644                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9645                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9646                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9647                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9648                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9649                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9650                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9651                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9652                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9653                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9654                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9655                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9656                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9657                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9658                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9659                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9660                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9661                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9662                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9663                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9664                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9665                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9666                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9667                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9668                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9669                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9670                 I40E_INSET_SCTP_VT,
9671                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9672                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9673                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9674                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9675                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9676                 I40E_INSET_IPV4_TTL,
9677                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9678                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9679                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9680                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9681                 I40E_INSET_IPV6_HOP_LIMIT,
9682                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9683                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9684                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9685                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9686                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9687                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9688                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9689                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9690                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9691                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9692                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9693                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9694                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9695                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9696                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9697                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9698                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9699                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9700                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9701                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9702                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9703                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9704                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9705                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9706                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9707                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9708                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9709                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9710                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9711                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9712                 I40E_INSET_SCTP_VT,
9713                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9714                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9715                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9716                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9717                 I40E_INSET_IPV6_HOP_LIMIT,
9718                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9719                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9720                 I40E_INSET_LAST_ETHER_TYPE,
9721         };
9722
9723         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9724                 return 0;
9725         if (filter == RTE_ETH_FILTER_HASH)
9726                 valid = valid_hash_inset_table[pctype];
9727         else
9728                 valid = valid_fdir_inset_table[pctype];
9729
9730         return valid;
9731 }
9732
9733 /**
9734  * Validate if the input set is allowed for a specific PCTYPE
9735  */
9736 int
9737 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9738                 enum rte_filter_type filter, uint64_t inset)
9739 {
9740         uint64_t valid;
9741
9742         valid = i40e_get_valid_input_set(pctype, filter);
9743         if (inset & (~valid))
9744                 return -EINVAL;
9745
9746         return 0;
9747 }
9748
9749 /* default input set fields combination per pctype */
9750 uint64_t
9751 i40e_get_default_input_set(uint16_t pctype)
9752 {
9753         static const uint64_t default_inset_table[] = {
9754                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9755                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9756                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9757                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9758                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9759                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9760                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9761                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9762                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9763                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9764                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9765                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9766                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9767                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9768                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9769                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9770                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9771                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9772                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9773                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9774                         I40E_INSET_SCTP_VT,
9775                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9776                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9777                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9778                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9779                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9780                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9781                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9782                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9783                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9784                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9785                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9786                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9787                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9788                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9789                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9790                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9791                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9792                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9793                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9794                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9795                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9796                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9797                         I40E_INSET_SCTP_VT,
9798                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9799                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9800                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9801                         I40E_INSET_LAST_ETHER_TYPE,
9802         };
9803
9804         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9805                 return 0;
9806
9807         return default_inset_table[pctype];
9808 }
9809
9810 /**
9811  * Parse the input set from index to logical bit masks
9812  */
9813 static int
9814 i40e_parse_input_set(uint64_t *inset,
9815                      enum i40e_filter_pctype pctype,
9816                      enum rte_eth_input_set_field *field,
9817                      uint16_t size)
9818 {
9819         uint16_t i, j;
9820         int ret = -EINVAL;
9821
9822         static const struct {
9823                 enum rte_eth_input_set_field field;
9824                 uint64_t inset;
9825         } inset_convert_table[] = {
9826                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9827                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9828                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9829                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9830                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9831                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9832                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9833                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9834                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9835                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9836                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9837                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9838                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9839                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9840                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9841                         I40E_INSET_IPV6_NEXT_HDR},
9842                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9843                         I40E_INSET_IPV6_HOP_LIMIT},
9844                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9845                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9846                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9847                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9848                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9849                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9850                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9851                         I40E_INSET_SCTP_VT},
9852                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9853                         I40E_INSET_TUNNEL_DMAC},
9854                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9855                         I40E_INSET_VLAN_TUNNEL},
9856                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9857                         I40E_INSET_TUNNEL_ID},
9858                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9859                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9860                         I40E_INSET_FLEX_PAYLOAD_W1},
9861                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9862                         I40E_INSET_FLEX_PAYLOAD_W2},
9863                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9864                         I40E_INSET_FLEX_PAYLOAD_W3},
9865                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9866                         I40E_INSET_FLEX_PAYLOAD_W4},
9867                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9868                         I40E_INSET_FLEX_PAYLOAD_W5},
9869                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9870                         I40E_INSET_FLEX_PAYLOAD_W6},
9871                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9872                         I40E_INSET_FLEX_PAYLOAD_W7},
9873                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9874                         I40E_INSET_FLEX_PAYLOAD_W8},
9875         };
9876
9877         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9878                 return ret;
9879
9880         /* Only one item allowed for default or all */
9881         if (size == 1) {
9882                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9883                         *inset = i40e_get_default_input_set(pctype);
9884                         return 0;
9885                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9886                         *inset = I40E_INSET_NONE;
9887                         return 0;
9888                 }
9889         }
9890
9891         for (i = 0, *inset = 0; i < size; i++) {
9892                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9893                         if (field[i] == inset_convert_table[j].field) {
9894                                 *inset |= inset_convert_table[j].inset;
9895                                 break;
9896                         }
9897                 }
9898
9899                 /* It contains unsupported input set, return immediately */
9900                 if (j == RTE_DIM(inset_convert_table))
9901                         return ret;
9902         }
9903
9904         return 0;
9905 }
9906
9907 /**
9908  * Translate the input set from bit masks to register aware bit masks
9909  * and vice versa
9910  */
9911 uint64_t
9912 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9913 {
9914         uint64_t val = 0;
9915         uint16_t i;
9916
9917         struct inset_map {
9918                 uint64_t inset;
9919                 uint64_t inset_reg;
9920         };
9921
9922         static const struct inset_map inset_map_common[] = {
9923                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9924                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9925                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9926                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9927                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9928                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9929                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9930                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9931                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9932                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9933                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9934                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9935                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9936                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9937                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9938                 {I40E_INSET_TUNNEL_DMAC,
9939                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9940                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9941                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9942                 {I40E_INSET_TUNNEL_SRC_PORT,
9943                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9944                 {I40E_INSET_TUNNEL_DST_PORT,
9945                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9946                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9947                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9948                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9949                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9950                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9951                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9952                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9953                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9954                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9955         };
9956
9957     /* some different registers map in x722*/
9958         static const struct inset_map inset_map_diff_x722[] = {
9959                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9960                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9961                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9962                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9963         };
9964
9965         static const struct inset_map inset_map_diff_not_x722[] = {
9966                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9967                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9968                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9969                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9970         };
9971
9972         if (input == 0)
9973                 return val;
9974
9975         /* Translate input set to register aware inset */
9976         if (type == I40E_MAC_X722) {
9977                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9978                         if (input & inset_map_diff_x722[i].inset)
9979                                 val |= inset_map_diff_x722[i].inset_reg;
9980                 }
9981         } else {
9982                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9983                         if (input & inset_map_diff_not_x722[i].inset)
9984                                 val |= inset_map_diff_not_x722[i].inset_reg;
9985                 }
9986         }
9987
9988         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9989                 if (input & inset_map_common[i].inset)
9990                         val |= inset_map_common[i].inset_reg;
9991         }
9992
9993         return val;
9994 }
9995
9996 int
9997 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9998 {
9999         uint8_t i, idx = 0;
10000         uint64_t inset_need_mask = inset;
10001
10002         static const struct {
10003                 uint64_t inset;
10004                 uint32_t mask;
10005         } inset_mask_map[] = {
10006                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
10007                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
10008                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
10009                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
10010                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
10011                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
10012                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
10013                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
10014         };
10015
10016         if (!inset || !mask || !nb_elem)
10017                 return 0;
10018
10019         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
10020                 /* Clear the inset bit, if no MASK is required,
10021                  * for example proto + ttl
10022                  */
10023                 if ((inset & inset_mask_map[i].inset) ==
10024                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
10025                         inset_need_mask &= ~inset_mask_map[i].inset;
10026                 if (!inset_need_mask)
10027                         return 0;
10028         }
10029         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
10030                 if ((inset_need_mask & inset_mask_map[i].inset) ==
10031                     inset_mask_map[i].inset) {
10032                         if (idx >= nb_elem) {
10033                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
10034                                 return -EINVAL;
10035                         }
10036                         mask[idx] = inset_mask_map[i].mask;
10037                         idx++;
10038                 }
10039         }
10040
10041         return idx;
10042 }
10043
10044 void
10045 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
10046 {
10047         uint32_t reg = i40e_read_rx_ctl(hw, addr);
10048
10049         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
10050         if (reg != val)
10051                 i40e_write_rx_ctl(hw, addr, val);
10052         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
10053                     (uint32_t)i40e_read_rx_ctl(hw, addr));
10054 }
10055
10056 void
10057 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
10058 {
10059         uint32_t reg = i40e_read_rx_ctl(hw, addr);
10060         struct rte_eth_dev *dev;
10061
10062         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
10063         if (reg != val) {
10064                 i40e_write_rx_ctl(hw, addr, val);
10065                 PMD_DRV_LOG(WARNING,
10066                             "i40e device %s changed global register [0x%08x]."
10067                             " original: 0x%08x, new: 0x%08x",
10068                             dev->device->name, addr, reg,
10069                             (uint32_t)i40e_read_rx_ctl(hw, addr));
10070         }
10071 }
10072
10073 static void
10074 i40e_filter_input_set_init(struct i40e_pf *pf)
10075 {
10076         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10077         enum i40e_filter_pctype pctype;
10078         uint64_t input_set, inset_reg;
10079         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10080         int num, i;
10081         uint16_t flow_type;
10082
10083         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
10084              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
10085                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
10086
10087                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
10088                         continue;
10089
10090                 input_set = i40e_get_default_input_set(pctype);
10091
10092                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10093                                                    I40E_INSET_MASK_NUM_REG);
10094                 if (num < 0)
10095                         return;
10096                 if (pf->support_multi_driver && num > 0) {
10097                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
10098                         return;
10099                 }
10100                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
10101                                         input_set);
10102
10103                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
10104                                       (uint32_t)(inset_reg & UINT32_MAX));
10105                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
10106                                      (uint32_t)((inset_reg >>
10107                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
10108                 if (!pf->support_multi_driver) {
10109                         i40e_check_write_global_reg(hw,
10110                                             I40E_GLQF_HASH_INSET(0, pctype),
10111                                             (uint32_t)(inset_reg & UINT32_MAX));
10112                         i40e_check_write_global_reg(hw,
10113                                              I40E_GLQF_HASH_INSET(1, pctype),
10114                                              (uint32_t)((inset_reg >>
10115                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
10116
10117                         for (i = 0; i < num; i++) {
10118                                 i40e_check_write_global_reg(hw,
10119                                                     I40E_GLQF_FD_MSK(i, pctype),
10120                                                     mask_reg[i]);
10121                                 i40e_check_write_global_reg(hw,
10122                                                   I40E_GLQF_HASH_MSK(i, pctype),
10123                                                   mask_reg[i]);
10124                         }
10125                         /*clear unused mask registers of the pctype */
10126                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
10127                                 i40e_check_write_global_reg(hw,
10128                                                     I40E_GLQF_FD_MSK(i, pctype),
10129                                                     0);
10130                                 i40e_check_write_global_reg(hw,
10131                                                   I40E_GLQF_HASH_MSK(i, pctype),
10132                                                   0);
10133                         }
10134                 } else {
10135                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
10136                 }
10137                 I40E_WRITE_FLUSH(hw);
10138
10139                 /* store the default input set */
10140                 if (!pf->support_multi_driver)
10141                         pf->hash_input_set[pctype] = input_set;
10142                 pf->fdir.input_set[pctype] = input_set;
10143         }
10144 }
10145
10146 int
10147 i40e_hash_filter_inset_select(struct i40e_hw *hw,
10148                          struct rte_eth_input_set_conf *conf)
10149 {
10150         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
10151         enum i40e_filter_pctype pctype;
10152         uint64_t input_set, inset_reg = 0;
10153         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10154         int ret, i, num;
10155
10156         if (!conf) {
10157                 PMD_DRV_LOG(ERR, "Invalid pointer");
10158                 return -EFAULT;
10159         }
10160         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10161             conf->op != RTE_ETH_INPUT_SET_ADD) {
10162                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10163                 return -EINVAL;
10164         }
10165
10166         if (pf->support_multi_driver) {
10167                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
10168                 return -ENOTSUP;
10169         }
10170
10171         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10172         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10173                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10174                 return -EINVAL;
10175         }
10176
10177         if (hw->mac.type == I40E_MAC_X722) {
10178                 /* get translated pctype value in fd pctype register */
10179                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
10180                         I40E_GLQF_FD_PCTYPES((int)pctype));
10181         }
10182
10183         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10184                                    conf->inset_size);
10185         if (ret) {
10186                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10187                 return -EINVAL;
10188         }
10189
10190         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
10191                 /* get inset value in register */
10192                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
10193                 inset_reg <<= I40E_32_BIT_WIDTH;
10194                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
10195                 input_set |= pf->hash_input_set[pctype];
10196         }
10197         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10198                                            I40E_INSET_MASK_NUM_REG);
10199         if (num < 0)
10200                 return -EINVAL;
10201
10202         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10203
10204         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
10205                                     (uint32_t)(inset_reg & UINT32_MAX));
10206         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
10207                                     (uint32_t)((inset_reg >>
10208                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
10209
10210         for (i = 0; i < num; i++)
10211                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10212                                             mask_reg[i]);
10213         /*clear unused mask registers of the pctype */
10214         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10215                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10216                                             0);
10217         I40E_WRITE_FLUSH(hw);
10218
10219         pf->hash_input_set[pctype] = input_set;
10220         return 0;
10221 }
10222
10223 int
10224 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
10225                          struct rte_eth_input_set_conf *conf)
10226 {
10227         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10228         enum i40e_filter_pctype pctype;
10229         uint64_t input_set, inset_reg = 0;
10230         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10231         int ret, i, num;
10232
10233         if (!hw || !conf) {
10234                 PMD_DRV_LOG(ERR, "Invalid pointer");
10235                 return -EFAULT;
10236         }
10237         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10238             conf->op != RTE_ETH_INPUT_SET_ADD) {
10239                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10240                 return -EINVAL;
10241         }
10242
10243         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10244
10245         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10246                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10247                 return -EINVAL;
10248         }
10249
10250         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10251                                    conf->inset_size);
10252         if (ret) {
10253                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10254                 return -EINVAL;
10255         }
10256
10257         /* get inset value in register */
10258         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
10259         inset_reg <<= I40E_32_BIT_WIDTH;
10260         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
10261
10262         /* Can not change the inset reg for flex payload for fdir,
10263          * it is done by writing I40E_PRTQF_FD_FLXINSET
10264          * in i40e_set_flex_mask_on_pctype.
10265          */
10266         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
10267                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
10268         else
10269                 input_set |= pf->fdir.input_set[pctype];
10270         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10271                                            I40E_INSET_MASK_NUM_REG);
10272         if (num < 0)
10273                 return -EINVAL;
10274         if (pf->support_multi_driver && num > 0) {
10275                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10276                 return -ENOTSUP;
10277         }
10278
10279         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10280
10281         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
10282                               (uint32_t)(inset_reg & UINT32_MAX));
10283         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
10284                              (uint32_t)((inset_reg >>
10285                              I40E_32_BIT_WIDTH) & UINT32_MAX));
10286
10287         if (!pf->support_multi_driver) {
10288                 for (i = 0; i < num; i++)
10289                         i40e_check_write_global_reg(hw,
10290                                                     I40E_GLQF_FD_MSK(i, pctype),
10291                                                     mask_reg[i]);
10292                 /*clear unused mask registers of the pctype */
10293                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10294                         i40e_check_write_global_reg(hw,
10295                                                     I40E_GLQF_FD_MSK(i, pctype),
10296                                                     0);
10297         } else {
10298                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10299         }
10300         I40E_WRITE_FLUSH(hw);
10301
10302         pf->fdir.input_set[pctype] = input_set;
10303         return 0;
10304 }
10305
10306 static int
10307 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10308 {
10309         int ret = 0;
10310
10311         if (!hw || !info) {
10312                 PMD_DRV_LOG(ERR, "Invalid pointer");
10313                 return -EFAULT;
10314         }
10315
10316         switch (info->info_type) {
10317         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10318                 i40e_get_symmetric_hash_enable_per_port(hw,
10319                                         &(info->info.enable));
10320                 break;
10321         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10322                 ret = i40e_get_hash_filter_global_config(hw,
10323                                 &(info->info.global_conf));
10324                 break;
10325         default:
10326                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10327                                                         info->info_type);
10328                 ret = -EINVAL;
10329                 break;
10330         }
10331
10332         return ret;
10333 }
10334
10335 static int
10336 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10337 {
10338         int ret = 0;
10339
10340         if (!hw || !info) {
10341                 PMD_DRV_LOG(ERR, "Invalid pointer");
10342                 return -EFAULT;
10343         }
10344
10345         switch (info->info_type) {
10346         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10347                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
10348                 break;
10349         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10350                 ret = i40e_set_hash_filter_global_config(hw,
10351                                 &(info->info.global_conf));
10352                 break;
10353         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
10354                 ret = i40e_hash_filter_inset_select(hw,
10355                                                &(info->info.input_set_conf));
10356                 break;
10357
10358         default:
10359                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10360                                                         info->info_type);
10361                 ret = -EINVAL;
10362                 break;
10363         }
10364
10365         return ret;
10366 }
10367
10368 /* Operations for hash function */
10369 static int
10370 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
10371                       enum rte_filter_op filter_op,
10372                       void *arg)
10373 {
10374         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10375         int ret = 0;
10376
10377         switch (filter_op) {
10378         case RTE_ETH_FILTER_NOP:
10379                 break;
10380         case RTE_ETH_FILTER_GET:
10381                 ret = i40e_hash_filter_get(hw,
10382                         (struct rte_eth_hash_filter_info *)arg);
10383                 break;
10384         case RTE_ETH_FILTER_SET:
10385                 ret = i40e_hash_filter_set(hw,
10386                         (struct rte_eth_hash_filter_info *)arg);
10387                 break;
10388         default:
10389                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
10390                                                                 filter_op);
10391                 ret = -ENOTSUP;
10392                 break;
10393         }
10394
10395         return ret;
10396 }
10397
10398 /* Convert ethertype filter structure */
10399 static int
10400 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
10401                               struct i40e_ethertype_filter *filter)
10402 {
10403         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
10404                 RTE_ETHER_ADDR_LEN);
10405         filter->input.ether_type = input->ether_type;
10406         filter->flags = input->flags;
10407         filter->queue = input->queue;
10408
10409         return 0;
10410 }
10411
10412 /* Check if there exists the ehtertype filter */
10413 struct i40e_ethertype_filter *
10414 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
10415                                 const struct i40e_ethertype_filter_input *input)
10416 {
10417         int ret;
10418
10419         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
10420         if (ret < 0)
10421                 return NULL;
10422
10423         return ethertype_rule->hash_map[ret];
10424 }
10425
10426 /* Add ethertype filter in SW list */
10427 static int
10428 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
10429                                 struct i40e_ethertype_filter *filter)
10430 {
10431         struct i40e_ethertype_rule *rule = &pf->ethertype;
10432         int ret;
10433
10434         ret = rte_hash_add_key(rule->hash_table, &filter->input);
10435         if (ret < 0) {
10436                 PMD_DRV_LOG(ERR,
10437                             "Failed to insert ethertype filter"
10438                             " to hash table %d!",
10439                             ret);
10440                 return ret;
10441         }
10442         rule->hash_map[ret] = filter;
10443
10444         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
10445
10446         return 0;
10447 }
10448
10449 /* Delete ethertype filter in SW list */
10450 int
10451 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
10452                              struct i40e_ethertype_filter_input *input)
10453 {
10454         struct i40e_ethertype_rule *rule = &pf->ethertype;
10455         struct i40e_ethertype_filter *filter;
10456         int ret;
10457
10458         ret = rte_hash_del_key(rule->hash_table, input);
10459         if (ret < 0) {
10460                 PMD_DRV_LOG(ERR,
10461                             "Failed to delete ethertype filter"
10462                             " to hash table %d!",
10463                             ret);
10464                 return ret;
10465         }
10466         filter = rule->hash_map[ret];
10467         rule->hash_map[ret] = NULL;
10468
10469         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
10470         rte_free(filter);
10471
10472         return 0;
10473 }
10474
10475 /*
10476  * Configure ethertype filter, which can director packet by filtering
10477  * with mac address and ether_type or only ether_type
10478  */
10479 int
10480 i40e_ethertype_filter_set(struct i40e_pf *pf,
10481                         struct rte_eth_ethertype_filter *filter,
10482                         bool add)
10483 {
10484         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10485         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
10486         struct i40e_ethertype_filter *ethertype_filter, *node;
10487         struct i40e_ethertype_filter check_filter;
10488         struct i40e_control_filter_stats stats;
10489         uint16_t flags = 0;
10490         int ret;
10491
10492         if (filter->queue >= pf->dev_data->nb_rx_queues) {
10493                 PMD_DRV_LOG(ERR, "Invalid queue ID");
10494                 return -EINVAL;
10495         }
10496         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
10497                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
10498                 PMD_DRV_LOG(ERR,
10499                         "unsupported ether_type(0x%04x) in control packet filter.",
10500                         filter->ether_type);
10501                 return -EINVAL;
10502         }
10503         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
10504                 PMD_DRV_LOG(WARNING,
10505                         "filter vlan ether_type in first tag is not supported.");
10506
10507         /* Check if there is the filter in SW list */
10508         memset(&check_filter, 0, sizeof(check_filter));
10509         i40e_ethertype_filter_convert(filter, &check_filter);
10510         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
10511                                                &check_filter.input);
10512         if (add && node) {
10513                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
10514                 return -EINVAL;
10515         }
10516
10517         if (!add && !node) {
10518                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
10519                 return -EINVAL;
10520         }
10521
10522         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
10523                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10524         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
10525                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10526         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10527
10528         memset(&stats, 0, sizeof(stats));
10529         ret = i40e_aq_add_rem_control_packet_filter(hw,
10530                         filter->mac_addr.addr_bytes,
10531                         filter->ether_type, flags,
10532                         pf->main_vsi->seid,
10533                         filter->queue, add, &stats, NULL);
10534
10535         PMD_DRV_LOG(INFO,
10536                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
10537                 ret, stats.mac_etype_used, stats.etype_used,
10538                 stats.mac_etype_free, stats.etype_free);
10539         if (ret < 0)
10540                 return -ENOSYS;
10541
10542         /* Add or delete a filter in SW list */
10543         if (add) {
10544                 ethertype_filter = rte_zmalloc("ethertype_filter",
10545                                        sizeof(*ethertype_filter), 0);
10546                 if (ethertype_filter == NULL) {
10547                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
10548                         return -ENOMEM;
10549                 }
10550
10551                 rte_memcpy(ethertype_filter, &check_filter,
10552                            sizeof(check_filter));
10553                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
10554                 if (ret < 0)
10555                         rte_free(ethertype_filter);
10556         } else {
10557                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
10558         }
10559
10560         return ret;
10561 }
10562
10563 /*
10564  * Handle operations for ethertype filter.
10565  */
10566 static int
10567 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
10568                                 enum rte_filter_op filter_op,
10569                                 void *arg)
10570 {
10571         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10572         int ret = 0;
10573
10574         if (filter_op == RTE_ETH_FILTER_NOP)
10575                 return ret;
10576
10577         if (arg == NULL) {
10578                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
10579                             filter_op);
10580                 return -EINVAL;
10581         }
10582
10583         switch (filter_op) {
10584         case RTE_ETH_FILTER_ADD:
10585                 ret = i40e_ethertype_filter_set(pf,
10586                         (struct rte_eth_ethertype_filter *)arg,
10587                         TRUE);
10588                 break;
10589         case RTE_ETH_FILTER_DELETE:
10590                 ret = i40e_ethertype_filter_set(pf,
10591                         (struct rte_eth_ethertype_filter *)arg,
10592                         FALSE);
10593                 break;
10594         default:
10595                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
10596                 ret = -ENOSYS;
10597                 break;
10598         }
10599         return ret;
10600 }
10601
10602 static int
10603 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
10604                      enum rte_filter_type filter_type,
10605                      enum rte_filter_op filter_op,
10606                      void *arg)
10607 {
10608         int ret = 0;
10609
10610         if (dev == NULL)
10611                 return -EINVAL;
10612
10613         switch (filter_type) {
10614         case RTE_ETH_FILTER_NONE:
10615                 /* For global configuration */
10616                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
10617                 break;
10618         case RTE_ETH_FILTER_HASH:
10619                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
10620                 break;
10621         case RTE_ETH_FILTER_MACVLAN:
10622                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
10623                 break;
10624         case RTE_ETH_FILTER_ETHERTYPE:
10625                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
10626                 break;
10627         case RTE_ETH_FILTER_TUNNEL:
10628                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
10629                 break;
10630         case RTE_ETH_FILTER_FDIR:
10631                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
10632                 break;
10633         case RTE_ETH_FILTER_GENERIC:
10634                 if (filter_op != RTE_ETH_FILTER_GET)
10635                         return -EINVAL;
10636                 *(const void **)arg = &i40e_flow_ops;
10637                 break;
10638         default:
10639                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
10640                                                         filter_type);
10641                 ret = -EINVAL;
10642                 break;
10643         }
10644
10645         return ret;
10646 }
10647
10648 /*
10649  * Check and enable Extended Tag.
10650  * Enabling Extended Tag is important for 40G performance.
10651  */
10652 static void
10653 i40e_enable_extended_tag(struct rte_eth_dev *dev)
10654 {
10655         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10656         uint32_t buf = 0;
10657         int ret;
10658
10659         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10660                                       PCI_DEV_CAP_REG);
10661         if (ret < 0) {
10662                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10663                             PCI_DEV_CAP_REG);
10664                 return;
10665         }
10666         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
10667                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
10668                 return;
10669         }
10670
10671         buf = 0;
10672         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10673                                       PCI_DEV_CTRL_REG);
10674         if (ret < 0) {
10675                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10676                             PCI_DEV_CTRL_REG);
10677                 return;
10678         }
10679         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
10680                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
10681                 return;
10682         }
10683         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
10684         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
10685                                        PCI_DEV_CTRL_REG);
10686         if (ret < 0) {
10687                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
10688                             PCI_DEV_CTRL_REG);
10689                 return;
10690         }
10691 }
10692
10693 /*
10694  * As some registers wouldn't be reset unless a global hardware reset,
10695  * hardware initialization is needed to put those registers into an
10696  * expected initial state.
10697  */
10698 static void
10699 i40e_hw_init(struct rte_eth_dev *dev)
10700 {
10701         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10702
10703         i40e_enable_extended_tag(dev);
10704
10705         /* clear the PF Queue Filter control register */
10706         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
10707
10708         /* Disable symmetric hash per port */
10709         i40e_set_symmetric_hash_enable_per_port(hw, 0);
10710 }
10711
10712 /*
10713  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
10714  * however this function will return only one highest pctype index,
10715  * which is not quite correct. This is known problem of i40e driver
10716  * and needs to be fixed later.
10717  */
10718 enum i40e_filter_pctype
10719 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
10720 {
10721         int i;
10722         uint64_t pctype_mask;
10723
10724         if (flow_type < I40E_FLOW_TYPE_MAX) {
10725                 pctype_mask = adapter->pctypes_tbl[flow_type];
10726                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10727                         if (pctype_mask & (1ULL << i))
10728                                 return (enum i40e_filter_pctype)i;
10729                 }
10730         }
10731         return I40E_FILTER_PCTYPE_INVALID;
10732 }
10733
10734 uint16_t
10735 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10736                         enum i40e_filter_pctype pctype)
10737 {
10738         uint16_t flowtype;
10739         uint64_t pctype_mask = 1ULL << pctype;
10740
10741         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10742              flowtype++) {
10743                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10744                         return flowtype;
10745         }
10746
10747         return RTE_ETH_FLOW_UNKNOWN;
10748 }
10749
10750 /*
10751  * On X710, performance number is far from the expectation on recent firmware
10752  * versions; on XL710, performance number is also far from the expectation on
10753  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10754  * mode is enabled and port MAC address is equal to the packet destination MAC
10755  * address. The fix for this issue may not be integrated in the following
10756  * firmware version. So the workaround in software driver is needed. It needs
10757  * to modify the initial values of 3 internal only registers for both X710 and
10758  * XL710. Note that the values for X710 or XL710 could be different, and the
10759  * workaround can be removed when it is fixed in firmware in the future.
10760  */
10761
10762 /* For both X710 and XL710 */
10763 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10764 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10765 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10766
10767 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10768 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10769
10770 /* For X722 */
10771 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10772 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10773
10774 /* For X710 */
10775 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10776 /* For XL710 */
10777 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10778 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10779
10780 /*
10781  * GL_SWR_PM_UP_THR:
10782  * The value is not impacted from the link speed, its value is set according
10783  * to the total number of ports for a better pipe-monitor configuration.
10784  */
10785 static bool
10786 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10787 {
10788 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10789                 .device_id = (dev),   \
10790                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10791
10792 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10793                 .device_id = (dev),   \
10794                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10795
10796         static const struct {
10797                 uint16_t device_id;
10798                 uint32_t val;
10799         } swr_pm_table[] = {
10800                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10801                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10802                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10803                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10804                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10805
10806                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10807                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10808                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10809                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10810                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10811                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10812                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10813         };
10814         uint32_t i;
10815
10816         if (value == NULL) {
10817                 PMD_DRV_LOG(ERR, "value is NULL");
10818                 return false;
10819         }
10820
10821         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10822                 if (hw->device_id == swr_pm_table[i].device_id) {
10823                         *value = swr_pm_table[i].val;
10824
10825                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10826                                     "value - 0x%08x",
10827                                     hw->device_id, *value);
10828                         return true;
10829                 }
10830         }
10831
10832         return false;
10833 }
10834
10835 static int
10836 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10837 {
10838         enum i40e_status_code status;
10839         struct i40e_aq_get_phy_abilities_resp phy_ab;
10840         int ret = -ENOTSUP;
10841         int retries = 0;
10842
10843         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10844                                               NULL);
10845
10846         while (status) {
10847                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10848                         status);
10849                 retries++;
10850                 rte_delay_us(100000);
10851                 if  (retries < 5)
10852                         status = i40e_aq_get_phy_capabilities(hw, false,
10853                                         true, &phy_ab, NULL);
10854                 else
10855                         return ret;
10856         }
10857         return 0;
10858 }
10859
10860 static void
10861 i40e_configure_registers(struct i40e_hw *hw)
10862 {
10863         static struct {
10864                 uint32_t addr;
10865                 uint64_t val;
10866         } reg_table[] = {
10867                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10868                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10869                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10870         };
10871         uint64_t reg;
10872         uint32_t i;
10873         int ret;
10874
10875         for (i = 0; i < RTE_DIM(reg_table); i++) {
10876                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10877                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10878                                 reg_table[i].val =
10879                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10880                         else /* For X710/XL710/XXV710 */
10881                                 if (hw->aq.fw_maj_ver < 6)
10882                                         reg_table[i].val =
10883                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10884                                 else
10885                                         reg_table[i].val =
10886                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10887                 }
10888
10889                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10890                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10891                                 reg_table[i].val =
10892                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10893                         else /* For X710/XL710/XXV710 */
10894                                 reg_table[i].val =
10895                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10896                 }
10897
10898                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10899                         uint32_t cfg_val;
10900
10901                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10902                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10903                                             "GL_SWR_PM_UP_THR value fixup",
10904                                             hw->device_id);
10905                                 continue;
10906                         }
10907
10908                         reg_table[i].val = cfg_val;
10909                 }
10910
10911                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10912                                                         &reg, NULL);
10913                 if (ret < 0) {
10914                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10915                                                         reg_table[i].addr);
10916                         break;
10917                 }
10918                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10919                                                 reg_table[i].addr, reg);
10920                 if (reg == reg_table[i].val)
10921                         continue;
10922
10923                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10924                                                 reg_table[i].val, NULL);
10925                 if (ret < 0) {
10926                         PMD_DRV_LOG(ERR,
10927                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10928                                 reg_table[i].val, reg_table[i].addr);
10929                         break;
10930                 }
10931                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10932                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10933         }
10934 }
10935
10936 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10937 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10938 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10939 static int
10940 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10941 {
10942         uint32_t reg;
10943         int ret;
10944
10945         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10946                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10947                 return -EINVAL;
10948         }
10949
10950         /* Configure for double VLAN RX stripping */
10951         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10952         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10953                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10954                 ret = i40e_aq_debug_write_register(hw,
10955                                                    I40E_VSI_TSR(vsi->vsi_id),
10956                                                    reg, NULL);
10957                 if (ret < 0) {
10958                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10959                                     vsi->vsi_id);
10960                         return I40E_ERR_CONFIG;
10961                 }
10962         }
10963
10964         /* Configure for double VLAN TX insertion */
10965         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10966         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10967                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10968                 ret = i40e_aq_debug_write_register(hw,
10969                                                    I40E_VSI_L2TAGSTXVALID(
10970                                                    vsi->vsi_id), reg, NULL);
10971                 if (ret < 0) {
10972                         PMD_DRV_LOG(ERR,
10973                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10974                                 vsi->vsi_id);
10975                         return I40E_ERR_CONFIG;
10976                 }
10977         }
10978
10979         return 0;
10980 }
10981
10982 /**
10983  * i40e_aq_add_mirror_rule
10984  * @hw: pointer to the hardware structure
10985  * @seid: VEB seid to add mirror rule to
10986  * @dst_id: destination vsi seid
10987  * @entries: Buffer which contains the entities to be mirrored
10988  * @count: number of entities contained in the buffer
10989  * @rule_id:the rule_id of the rule to be added
10990  *
10991  * Add a mirror rule for a given veb.
10992  *
10993  **/
10994 static enum i40e_status_code
10995 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10996                         uint16_t seid, uint16_t dst_id,
10997                         uint16_t rule_type, uint16_t *entries,
10998                         uint16_t count, uint16_t *rule_id)
10999 {
11000         struct i40e_aq_desc desc;
11001         struct i40e_aqc_add_delete_mirror_rule cmd;
11002         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
11003                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
11004                 &desc.params.raw;
11005         uint16_t buff_len;
11006         enum i40e_status_code status;
11007
11008         i40e_fill_default_direct_cmd_desc(&desc,
11009                                           i40e_aqc_opc_add_mirror_rule);
11010         memset(&cmd, 0, sizeof(cmd));
11011
11012         buff_len = sizeof(uint16_t) * count;
11013         desc.datalen = rte_cpu_to_le_16(buff_len);
11014         if (buff_len > 0)
11015                 desc.flags |= rte_cpu_to_le_16(
11016                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
11017         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
11018                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
11019         cmd.num_entries = rte_cpu_to_le_16(count);
11020         cmd.seid = rte_cpu_to_le_16(seid);
11021         cmd.destination = rte_cpu_to_le_16(dst_id);
11022
11023         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
11024         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
11025         PMD_DRV_LOG(INFO,
11026                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
11027                 hw->aq.asq_last_status, resp->rule_id,
11028                 resp->mirror_rules_used, resp->mirror_rules_free);
11029         *rule_id = rte_le_to_cpu_16(resp->rule_id);
11030
11031         return status;
11032 }
11033
11034 /**
11035  * i40e_aq_del_mirror_rule
11036  * @hw: pointer to the hardware structure
11037  * @seid: VEB seid to add mirror rule to
11038  * @entries: Buffer which contains the entities to be mirrored
11039  * @count: number of entities contained in the buffer
11040  * @rule_id:the rule_id of the rule to be delete
11041  *
11042  * Delete a mirror rule for a given veb.
11043  *
11044  **/
11045 static enum i40e_status_code
11046 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
11047                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
11048                 uint16_t count, uint16_t rule_id)
11049 {
11050         struct i40e_aq_desc desc;
11051         struct i40e_aqc_add_delete_mirror_rule cmd;
11052         uint16_t buff_len = 0;
11053         enum i40e_status_code status;
11054         void *buff = NULL;
11055
11056         i40e_fill_default_direct_cmd_desc(&desc,
11057                                           i40e_aqc_opc_delete_mirror_rule);
11058         memset(&cmd, 0, sizeof(cmd));
11059         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
11060                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
11061                                                           I40E_AQ_FLAG_RD));
11062                 cmd.num_entries = count;
11063                 buff_len = sizeof(uint16_t) * count;
11064                 desc.datalen = rte_cpu_to_le_16(buff_len);
11065                 buff = (void *)entries;
11066         } else
11067                 /* rule id is filled in destination field for deleting mirror rule */
11068                 cmd.destination = rte_cpu_to_le_16(rule_id);
11069
11070         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
11071                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
11072         cmd.seid = rte_cpu_to_le_16(seid);
11073
11074         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
11075         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
11076
11077         return status;
11078 }
11079
11080 /**
11081  * i40e_mirror_rule_set
11082  * @dev: pointer to the hardware structure
11083  * @mirror_conf: mirror rule info
11084  * @sw_id: mirror rule's sw_id
11085  * @on: enable/disable
11086  *
11087  * set a mirror rule.
11088  *
11089  **/
11090 static int
11091 i40e_mirror_rule_set(struct rte_eth_dev *dev,
11092                         struct rte_eth_mirror_conf *mirror_conf,
11093                         uint8_t sw_id, uint8_t on)
11094 {
11095         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11096         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11097         struct i40e_mirror_rule *it, *mirr_rule = NULL;
11098         struct i40e_mirror_rule *parent = NULL;
11099         uint16_t seid, dst_seid, rule_id;
11100         uint16_t i, j = 0;
11101         int ret;
11102
11103         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
11104
11105         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
11106                 PMD_DRV_LOG(ERR,
11107                         "mirror rule can not be configured without veb or vfs.");
11108                 return -ENOSYS;
11109         }
11110         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
11111                 PMD_DRV_LOG(ERR, "mirror table is full.");
11112                 return -ENOSPC;
11113         }
11114         if (mirror_conf->dst_pool > pf->vf_num) {
11115                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
11116                                  mirror_conf->dst_pool);
11117                 return -EINVAL;
11118         }
11119
11120         seid = pf->main_vsi->veb->seid;
11121
11122         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11123                 if (sw_id <= it->index) {
11124                         mirr_rule = it;
11125                         break;
11126                 }
11127                 parent = it;
11128         }
11129         if (mirr_rule && sw_id == mirr_rule->index) {
11130                 if (on) {
11131                         PMD_DRV_LOG(ERR, "mirror rule exists.");
11132                         return -EEXIST;
11133                 } else {
11134                         ret = i40e_aq_del_mirror_rule(hw, seid,
11135                                         mirr_rule->rule_type,
11136                                         mirr_rule->entries,
11137                                         mirr_rule->num_entries, mirr_rule->id);
11138                         if (ret < 0) {
11139                                 PMD_DRV_LOG(ERR,
11140                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
11141                                         ret, hw->aq.asq_last_status);
11142                                 return -ENOSYS;
11143                         }
11144                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11145                         rte_free(mirr_rule);
11146                         pf->nb_mirror_rule--;
11147                         return 0;
11148                 }
11149         } else if (!on) {
11150                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11151                 return -ENOENT;
11152         }
11153
11154         mirr_rule = rte_zmalloc("i40e_mirror_rule",
11155                                 sizeof(struct i40e_mirror_rule) , 0);
11156         if (!mirr_rule) {
11157                 PMD_DRV_LOG(ERR, "failed to allocate memory");
11158                 return I40E_ERR_NO_MEMORY;
11159         }
11160         switch (mirror_conf->rule_type) {
11161         case ETH_MIRROR_VLAN:
11162                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
11163                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
11164                                 mirr_rule->entries[j] =
11165                                         mirror_conf->vlan.vlan_id[i];
11166                                 j++;
11167                         }
11168                 }
11169                 if (j == 0) {
11170                         PMD_DRV_LOG(ERR, "vlan is not specified.");
11171                         rte_free(mirr_rule);
11172                         return -EINVAL;
11173                 }
11174                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
11175                 break;
11176         case ETH_MIRROR_VIRTUAL_POOL_UP:
11177         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
11178                 /* check if the specified pool bit is out of range */
11179                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
11180                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
11181                         rte_free(mirr_rule);
11182                         return -EINVAL;
11183                 }
11184                 for (i = 0, j = 0; i < pf->vf_num; i++) {
11185                         if (mirror_conf->pool_mask & (1ULL << i)) {
11186                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
11187                                 j++;
11188                         }
11189                 }
11190                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
11191                         /* add pf vsi to entries */
11192                         mirr_rule->entries[j] = pf->main_vsi_seid;
11193                         j++;
11194                 }
11195                 if (j == 0) {
11196                         PMD_DRV_LOG(ERR, "pool is not specified.");
11197                         rte_free(mirr_rule);
11198                         return -EINVAL;
11199                 }
11200                 /* egress and ingress in aq commands means from switch but not port */
11201                 mirr_rule->rule_type =
11202                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
11203                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
11204                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
11205                 break;
11206         case ETH_MIRROR_UPLINK_PORT:
11207                 /* egress and ingress in aq commands means from switch but not port*/
11208                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
11209                 break;
11210         case ETH_MIRROR_DOWNLINK_PORT:
11211                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
11212                 break;
11213         default:
11214                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
11215                         mirror_conf->rule_type);
11216                 rte_free(mirr_rule);
11217                 return -EINVAL;
11218         }
11219
11220         /* If the dst_pool is equal to vf_num, consider it as PF */
11221         if (mirror_conf->dst_pool == pf->vf_num)
11222                 dst_seid = pf->main_vsi_seid;
11223         else
11224                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
11225
11226         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
11227                                       mirr_rule->rule_type, mirr_rule->entries,
11228                                       j, &rule_id);
11229         if (ret < 0) {
11230                 PMD_DRV_LOG(ERR,
11231                         "failed to add mirror rule: ret = %d, aq_err = %d.",
11232                         ret, hw->aq.asq_last_status);
11233                 rte_free(mirr_rule);
11234                 return -ENOSYS;
11235         }
11236
11237         mirr_rule->index = sw_id;
11238         mirr_rule->num_entries = j;
11239         mirr_rule->id = rule_id;
11240         mirr_rule->dst_vsi_seid = dst_seid;
11241
11242         if (parent)
11243                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
11244         else
11245                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
11246
11247         pf->nb_mirror_rule++;
11248         return 0;
11249 }
11250
11251 /**
11252  * i40e_mirror_rule_reset
11253  * @dev: pointer to the device
11254  * @sw_id: mirror rule's sw_id
11255  *
11256  * reset a mirror rule.
11257  *
11258  **/
11259 static int
11260 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
11261 {
11262         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11263         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11264         struct i40e_mirror_rule *it, *mirr_rule = NULL;
11265         uint16_t seid;
11266         int ret;
11267
11268         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
11269
11270         seid = pf->main_vsi->veb->seid;
11271
11272         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11273                 if (sw_id == it->index) {
11274                         mirr_rule = it;
11275                         break;
11276                 }
11277         }
11278         if (mirr_rule) {
11279                 ret = i40e_aq_del_mirror_rule(hw, seid,
11280                                 mirr_rule->rule_type,
11281                                 mirr_rule->entries,
11282                                 mirr_rule->num_entries, mirr_rule->id);
11283                 if (ret < 0) {
11284                         PMD_DRV_LOG(ERR,
11285                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
11286                                 ret, hw->aq.asq_last_status);
11287                         return -ENOSYS;
11288                 }
11289                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11290                 rte_free(mirr_rule);
11291                 pf->nb_mirror_rule--;
11292         } else {
11293                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11294                 return -ENOENT;
11295         }
11296         return 0;
11297 }
11298
11299 static uint64_t
11300 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
11301 {
11302         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11303         uint64_t systim_cycles;
11304
11305         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
11306         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
11307                         << 32;
11308
11309         return systim_cycles;
11310 }
11311
11312 static uint64_t
11313 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
11314 {
11315         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11316         uint64_t rx_tstamp;
11317
11318         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
11319         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
11320                         << 32;
11321
11322         return rx_tstamp;
11323 }
11324
11325 static uint64_t
11326 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
11327 {
11328         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11329         uint64_t tx_tstamp;
11330
11331         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
11332         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
11333                         << 32;
11334
11335         return tx_tstamp;
11336 }
11337
11338 static void
11339 i40e_start_timecounters(struct rte_eth_dev *dev)
11340 {
11341         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11342         struct i40e_adapter *adapter = dev->data->dev_private;
11343         struct rte_eth_link link;
11344         uint32_t tsync_inc_l;
11345         uint32_t tsync_inc_h;
11346
11347         /* Get current link speed. */
11348         i40e_dev_link_update(dev, 1);
11349         rte_eth_linkstatus_get(dev, &link);
11350
11351         switch (link.link_speed) {
11352         case ETH_SPEED_NUM_40G:
11353         case ETH_SPEED_NUM_25G:
11354                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
11355                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
11356                 break;
11357         case ETH_SPEED_NUM_10G:
11358                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
11359                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
11360                 break;
11361         case ETH_SPEED_NUM_1G:
11362                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
11363                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
11364                 break;
11365         default:
11366                 tsync_inc_l = 0x0;
11367                 tsync_inc_h = 0x0;
11368         }
11369
11370         /* Set the timesync increment value. */
11371         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
11372         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
11373
11374         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
11375         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11376         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11377
11378         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11379         adapter->systime_tc.cc_shift = 0;
11380         adapter->systime_tc.nsec_mask = 0;
11381
11382         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11383         adapter->rx_tstamp_tc.cc_shift = 0;
11384         adapter->rx_tstamp_tc.nsec_mask = 0;
11385
11386         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11387         adapter->tx_tstamp_tc.cc_shift = 0;
11388         adapter->tx_tstamp_tc.nsec_mask = 0;
11389 }
11390
11391 static int
11392 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
11393 {
11394         struct i40e_adapter *adapter = dev->data->dev_private;
11395
11396         adapter->systime_tc.nsec += delta;
11397         adapter->rx_tstamp_tc.nsec += delta;
11398         adapter->tx_tstamp_tc.nsec += delta;
11399
11400         return 0;
11401 }
11402
11403 static int
11404 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
11405 {
11406         uint64_t ns;
11407         struct i40e_adapter *adapter = dev->data->dev_private;
11408
11409         ns = rte_timespec_to_ns(ts);
11410
11411         /* Set the timecounters to a new value. */
11412         adapter->systime_tc.nsec = ns;
11413         adapter->rx_tstamp_tc.nsec = ns;
11414         adapter->tx_tstamp_tc.nsec = ns;
11415
11416         return 0;
11417 }
11418
11419 static int
11420 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
11421 {
11422         uint64_t ns, systime_cycles;
11423         struct i40e_adapter *adapter = dev->data->dev_private;
11424
11425         systime_cycles = i40e_read_systime_cyclecounter(dev);
11426         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
11427         *ts = rte_ns_to_timespec(ns);
11428
11429         return 0;
11430 }
11431
11432 static int
11433 i40e_timesync_enable(struct rte_eth_dev *dev)
11434 {
11435         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11436         uint32_t tsync_ctl_l;
11437         uint32_t tsync_ctl_h;
11438
11439         /* Stop the timesync system time. */
11440         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11441         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11442         /* Reset the timesync system time value. */
11443         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
11444         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
11445
11446         i40e_start_timecounters(dev);
11447
11448         /* Clear timesync registers. */
11449         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11450         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
11451         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
11452         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
11453         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
11454         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
11455
11456         /* Enable timestamping of PTP packets. */
11457         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11458         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
11459
11460         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11461         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
11462         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
11463
11464         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11465         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11466
11467         return 0;
11468 }
11469
11470 static int
11471 i40e_timesync_disable(struct rte_eth_dev *dev)
11472 {
11473         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11474         uint32_t tsync_ctl_l;
11475         uint32_t tsync_ctl_h;
11476
11477         /* Disable timestamping of transmitted PTP packets. */
11478         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11479         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
11480
11481         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11482         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
11483
11484         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11485         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11486
11487         /* Reset the timesync increment value. */
11488         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11489         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11490
11491         return 0;
11492 }
11493
11494 static int
11495 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
11496                                 struct timespec *timestamp, uint32_t flags)
11497 {
11498         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11499         struct i40e_adapter *adapter = dev->data->dev_private;
11500         uint32_t sync_status;
11501         uint32_t index = flags & 0x03;
11502         uint64_t rx_tstamp_cycles;
11503         uint64_t ns;
11504
11505         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
11506         if ((sync_status & (1 << index)) == 0)
11507                 return -EINVAL;
11508
11509         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
11510         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
11511         *timestamp = rte_ns_to_timespec(ns);
11512
11513         return 0;
11514 }
11515
11516 static int
11517 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
11518                                 struct timespec *timestamp)
11519 {
11520         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11521         struct i40e_adapter *adapter = dev->data->dev_private;
11522         uint32_t sync_status;
11523         uint64_t tx_tstamp_cycles;
11524         uint64_t ns;
11525
11526         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11527         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
11528                 return -EINVAL;
11529
11530         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
11531         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
11532         *timestamp = rte_ns_to_timespec(ns);
11533
11534         return 0;
11535 }
11536
11537 /*
11538  * i40e_parse_dcb_configure - parse dcb configure from user
11539  * @dev: the device being configured
11540  * @dcb_cfg: pointer of the result of parse
11541  * @*tc_map: bit map of enabled traffic classes
11542  *
11543  * Returns 0 on success, negative value on failure
11544  */
11545 static int
11546 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
11547                          struct i40e_dcbx_config *dcb_cfg,
11548                          uint8_t *tc_map)
11549 {
11550         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
11551         uint8_t i, tc_bw, bw_lf;
11552
11553         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
11554
11555         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
11556         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
11557                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
11558                 return -EINVAL;
11559         }
11560
11561         /* assume each tc has the same bw */
11562         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
11563         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11564                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
11565         /* to ensure the sum of tcbw is equal to 100 */
11566         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
11567         for (i = 0; i < bw_lf; i++)
11568                 dcb_cfg->etscfg.tcbwtable[i]++;
11569
11570         /* assume each tc has the same Transmission Selection Algorithm */
11571         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11572                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
11573
11574         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11575                 dcb_cfg->etscfg.prioritytable[i] =
11576                                 dcb_rx_conf->dcb_tc[i];
11577
11578         /* FW needs one App to configure HW */
11579         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
11580         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
11581         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
11582         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
11583
11584         if (dcb_rx_conf->nb_tcs == 0)
11585                 *tc_map = 1; /* tc0 only */
11586         else
11587                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
11588
11589         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
11590                 dcb_cfg->pfc.willing = 0;
11591                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
11592                 dcb_cfg->pfc.pfcenable = *tc_map;
11593         }
11594         return 0;
11595 }
11596
11597
11598 static enum i40e_status_code
11599 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
11600                               struct i40e_aqc_vsi_properties_data *info,
11601                               uint8_t enabled_tcmap)
11602 {
11603         enum i40e_status_code ret;
11604         int i, total_tc = 0;
11605         uint16_t qpnum_per_tc, bsf, qp_idx;
11606         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
11607         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
11608         uint16_t used_queues;
11609
11610         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
11611         if (ret != I40E_SUCCESS)
11612                 return ret;
11613
11614         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11615                 if (enabled_tcmap & (1 << i))
11616                         total_tc++;
11617         }
11618         if (total_tc == 0)
11619                 total_tc = 1;
11620         vsi->enabled_tc = enabled_tcmap;
11621
11622         /* different VSI has different queues assigned */
11623         if (vsi->type == I40E_VSI_MAIN)
11624                 used_queues = dev_data->nb_rx_queues -
11625                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11626         else if (vsi->type == I40E_VSI_VMDQ2)
11627                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11628         else {
11629                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
11630                 return I40E_ERR_NO_AVAILABLE_VSI;
11631         }
11632
11633         qpnum_per_tc = used_queues / total_tc;
11634         /* Number of queues per enabled TC */
11635         if (qpnum_per_tc == 0) {
11636                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
11637                 return I40E_ERR_INVALID_QP_ID;
11638         }
11639         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
11640                                 I40E_MAX_Q_PER_TC);
11641         bsf = rte_bsf32(qpnum_per_tc);
11642
11643         /**
11644          * Configure TC and queue mapping parameters, for enabled TC,
11645          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
11646          * default queue will serve it.
11647          */
11648         qp_idx = 0;
11649         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11650                 if (vsi->enabled_tc & (1 << i)) {
11651                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
11652                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
11653                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
11654                         qp_idx += qpnum_per_tc;
11655                 } else
11656                         info->tc_mapping[i] = 0;
11657         }
11658
11659         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
11660         if (vsi->type == I40E_VSI_SRIOV) {
11661                 info->mapping_flags |=
11662                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
11663                 for (i = 0; i < vsi->nb_qps; i++)
11664                         info->queue_mapping[i] =
11665                                 rte_cpu_to_le_16(vsi->base_queue + i);
11666         } else {
11667                 info->mapping_flags |=
11668                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
11669                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
11670         }
11671         info->valid_sections |=
11672                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
11673
11674         return I40E_SUCCESS;
11675 }
11676
11677 /*
11678  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
11679  * @veb: VEB to be configured
11680  * @tc_map: enabled TC bitmap
11681  *
11682  * Returns 0 on success, negative value on failure
11683  */
11684 static enum i40e_status_code
11685 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
11686 {
11687         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
11688         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
11689         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
11690         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
11691         enum i40e_status_code ret = I40E_SUCCESS;
11692         int i;
11693         uint32_t bw_max;
11694
11695         /* Check if enabled_tc is same as existing or new TCs */
11696         if (veb->enabled_tc == tc_map)
11697                 return ret;
11698
11699         /* configure tc bandwidth */
11700         memset(&veb_bw, 0, sizeof(veb_bw));
11701         veb_bw.tc_valid_bits = tc_map;
11702         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11703         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11704                 if (tc_map & BIT_ULL(i))
11705                         veb_bw.tc_bw_share_credits[i] = 1;
11706         }
11707         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
11708                                                    &veb_bw, NULL);
11709         if (ret) {
11710                 PMD_INIT_LOG(ERR,
11711                         "AQ command Config switch_comp BW allocation per TC failed = %d",
11712                         hw->aq.asq_last_status);
11713                 return ret;
11714         }
11715
11716         memset(&ets_query, 0, sizeof(ets_query));
11717         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
11718                                                    &ets_query, NULL);
11719         if (ret != I40E_SUCCESS) {
11720                 PMD_DRV_LOG(ERR,
11721                         "Failed to get switch_comp ETS configuration %u",
11722                         hw->aq.asq_last_status);
11723                 return ret;
11724         }
11725         memset(&bw_query, 0, sizeof(bw_query));
11726         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
11727                                                   &bw_query, NULL);
11728         if (ret != I40E_SUCCESS) {
11729                 PMD_DRV_LOG(ERR,
11730                         "Failed to get switch_comp bandwidth configuration %u",
11731                         hw->aq.asq_last_status);
11732                 return ret;
11733         }
11734
11735         /* store and print out BW info */
11736         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
11737         veb->bw_info.bw_max = ets_query.tc_bw_max;
11738         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
11739         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
11740         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
11741                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
11742                      I40E_16_BIT_WIDTH);
11743         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11744                 veb->bw_info.bw_ets_share_credits[i] =
11745                                 bw_query.tc_bw_share_credits[i];
11746                 veb->bw_info.bw_ets_credits[i] =
11747                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11748                 /* 4 bits per TC, 4th bit is reserved */
11749                 veb->bw_info.bw_ets_max[i] =
11750                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11751                                   RTE_LEN2MASK(3, uint8_t));
11752                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11753                             veb->bw_info.bw_ets_share_credits[i]);
11754                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11755                             veb->bw_info.bw_ets_credits[i]);
11756                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11757                             veb->bw_info.bw_ets_max[i]);
11758         }
11759
11760         veb->enabled_tc = tc_map;
11761
11762         return ret;
11763 }
11764
11765
11766 /*
11767  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11768  * @vsi: VSI to be configured
11769  * @tc_map: enabled TC bitmap
11770  *
11771  * Returns 0 on success, negative value on failure
11772  */
11773 static enum i40e_status_code
11774 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11775 {
11776         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11777         struct i40e_vsi_context ctxt;
11778         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11779         enum i40e_status_code ret = I40E_SUCCESS;
11780         int i;
11781
11782         /* Check if enabled_tc is same as existing or new TCs */
11783         if (vsi->enabled_tc == tc_map)
11784                 return ret;
11785
11786         /* configure tc bandwidth */
11787         memset(&bw_data, 0, sizeof(bw_data));
11788         bw_data.tc_valid_bits = tc_map;
11789         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11790         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11791                 if (tc_map & BIT_ULL(i))
11792                         bw_data.tc_bw_credits[i] = 1;
11793         }
11794         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11795         if (ret) {
11796                 PMD_INIT_LOG(ERR,
11797                         "AQ command Config VSI BW allocation per TC failed = %d",
11798                         hw->aq.asq_last_status);
11799                 goto out;
11800         }
11801         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11802                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11803
11804         /* Update Queue Pairs Mapping for currently enabled UPs */
11805         ctxt.seid = vsi->seid;
11806         ctxt.pf_num = hw->pf_id;
11807         ctxt.vf_num = 0;
11808         ctxt.uplink_seid = vsi->uplink_seid;
11809         ctxt.info = vsi->info;
11810         i40e_get_cap(hw);
11811         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11812         if (ret)
11813                 goto out;
11814
11815         /* Update the VSI after updating the VSI queue-mapping information */
11816         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11817         if (ret) {
11818                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11819                         hw->aq.asq_last_status);
11820                 goto out;
11821         }
11822         /* update the local VSI info with updated queue map */
11823         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11824                                         sizeof(vsi->info.tc_mapping));
11825         rte_memcpy(&vsi->info.queue_mapping,
11826                         &ctxt.info.queue_mapping,
11827                 sizeof(vsi->info.queue_mapping));
11828         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11829         vsi->info.valid_sections = 0;
11830
11831         /* query and update current VSI BW information */
11832         ret = i40e_vsi_get_bw_config(vsi);
11833         if (ret) {
11834                 PMD_INIT_LOG(ERR,
11835                          "Failed updating vsi bw info, err %s aq_err %s",
11836                          i40e_stat_str(hw, ret),
11837                          i40e_aq_str(hw, hw->aq.asq_last_status));
11838                 goto out;
11839         }
11840
11841         vsi->enabled_tc = tc_map;
11842
11843 out:
11844         return ret;
11845 }
11846
11847 /*
11848  * i40e_dcb_hw_configure - program the dcb setting to hw
11849  * @pf: pf the configuration is taken on
11850  * @new_cfg: new configuration
11851  * @tc_map: enabled TC bitmap
11852  *
11853  * Returns 0 on success, negative value on failure
11854  */
11855 static enum i40e_status_code
11856 i40e_dcb_hw_configure(struct i40e_pf *pf,
11857                       struct i40e_dcbx_config *new_cfg,
11858                       uint8_t tc_map)
11859 {
11860         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11861         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11862         struct i40e_vsi *main_vsi = pf->main_vsi;
11863         struct i40e_vsi_list *vsi_list;
11864         enum i40e_status_code ret;
11865         int i;
11866         uint32_t val;
11867
11868         /* Use the FW API if FW > v4.4*/
11869         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11870               (hw->aq.fw_maj_ver >= 5))) {
11871                 PMD_INIT_LOG(ERR,
11872                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11873                 return I40E_ERR_FIRMWARE_API_VERSION;
11874         }
11875
11876         /* Check if need reconfiguration */
11877         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11878                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11879                 return I40E_SUCCESS;
11880         }
11881
11882         /* Copy the new config to the current config */
11883         *old_cfg = *new_cfg;
11884         old_cfg->etsrec = old_cfg->etscfg;
11885         ret = i40e_set_dcb_config(hw);
11886         if (ret) {
11887                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11888                          i40e_stat_str(hw, ret),
11889                          i40e_aq_str(hw, hw->aq.asq_last_status));
11890                 return ret;
11891         }
11892         /* set receive Arbiter to RR mode and ETS scheme by default */
11893         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11894                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11895                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11896                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11897                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11898                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11899                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11900                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11901                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11902                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11903                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11904                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11905                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11906         }
11907         /* get local mib to check whether it is configured correctly */
11908         /* IEEE mode */
11909         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11910         /* Get Local DCB Config */
11911         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11912                                      &hw->local_dcbx_config);
11913
11914         /* if Veb is created, need to update TC of it at first */
11915         if (main_vsi->veb) {
11916                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11917                 if (ret)
11918                         PMD_INIT_LOG(WARNING,
11919                                  "Failed configuring TC for VEB seid=%d",
11920                                  main_vsi->veb->seid);
11921         }
11922         /* Update each VSI */
11923         i40e_vsi_config_tc(main_vsi, tc_map);
11924         if (main_vsi->veb) {
11925                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11926                         /* Beside main VSI and VMDQ VSIs, only enable default
11927                          * TC for other VSIs
11928                          */
11929                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11930                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11931                                                          tc_map);
11932                         else
11933                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11934                                                          I40E_DEFAULT_TCMAP);
11935                         if (ret)
11936                                 PMD_INIT_LOG(WARNING,
11937                                         "Failed configuring TC for VSI seid=%d",
11938                                         vsi_list->vsi->seid);
11939                         /* continue */
11940                 }
11941         }
11942         return I40E_SUCCESS;
11943 }
11944
11945 /*
11946  * i40e_dcb_init_configure - initial dcb config
11947  * @dev: device being configured
11948  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11949  *
11950  * Returns 0 on success, negative value on failure
11951  */
11952 int
11953 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11954 {
11955         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11956         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11957         int i, ret = 0;
11958
11959         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11960                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11961                 return -ENOTSUP;
11962         }
11963
11964         /* DCB initialization:
11965          * Update DCB configuration from the Firmware and configure
11966          * LLDP MIB change event.
11967          */
11968         if (sw_dcb == TRUE) {
11969                 /* Stopping lldp is necessary for DPDK, but it will cause
11970                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11971                  * for successful initialization of DCB is that LLDP is
11972                  * enabled. So it is needed to start lldp before DCB init
11973                  * and stop it after initialization.
11974                  */
11975                 ret = i40e_aq_start_lldp(hw, true, NULL);
11976                 if (ret != I40E_SUCCESS)
11977                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11978
11979                 ret = i40e_init_dcb(hw, true);
11980                 /* If lldp agent is stopped, the return value from
11981                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11982                  * adminq status. Otherwise, it should return success.
11983                  */
11984                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11985                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11986                         memset(&hw->local_dcbx_config, 0,
11987                                 sizeof(struct i40e_dcbx_config));
11988                         /* set dcb default configuration */
11989                         hw->local_dcbx_config.etscfg.willing = 0;
11990                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11991                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11992                         hw->local_dcbx_config.etscfg.tsatable[0] =
11993                                                 I40E_IEEE_TSA_ETS;
11994                         /* all UPs mapping to TC0 */
11995                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11996                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11997                         hw->local_dcbx_config.etsrec =
11998                                 hw->local_dcbx_config.etscfg;
11999                         hw->local_dcbx_config.pfc.willing = 0;
12000                         hw->local_dcbx_config.pfc.pfccap =
12001                                                 I40E_MAX_TRAFFIC_CLASS;
12002                         /* FW needs one App to configure HW */
12003                         hw->local_dcbx_config.numapps = 1;
12004                         hw->local_dcbx_config.app[0].selector =
12005                                                 I40E_APP_SEL_ETHTYPE;
12006                         hw->local_dcbx_config.app[0].priority = 3;
12007                         hw->local_dcbx_config.app[0].protocolid =
12008                                                 I40E_APP_PROTOID_FCOE;
12009                         ret = i40e_set_dcb_config(hw);
12010                         if (ret) {
12011                                 PMD_INIT_LOG(ERR,
12012                                         "default dcb config fails. err = %d, aq_err = %d.",
12013                                         ret, hw->aq.asq_last_status);
12014                                 return -ENOSYS;
12015                         }
12016                 } else {
12017                         PMD_INIT_LOG(ERR,
12018                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
12019                                 ret, hw->aq.asq_last_status);
12020                         return -ENOTSUP;
12021                 }
12022
12023                 if (i40e_need_stop_lldp(dev)) {
12024                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
12025                         if (ret != I40E_SUCCESS)
12026                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
12027                 }
12028         } else {
12029                 ret = i40e_aq_start_lldp(hw, true, NULL);
12030                 if (ret != I40E_SUCCESS)
12031                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
12032
12033                 ret = i40e_init_dcb(hw, true);
12034                 if (!ret) {
12035                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
12036                                 PMD_INIT_LOG(ERR,
12037                                         "HW doesn't support DCBX offload.");
12038                                 return -ENOTSUP;
12039                         }
12040                 } else {
12041                         PMD_INIT_LOG(ERR,
12042                                 "DCBX configuration failed, err = %d, aq_err = %d.",
12043                                 ret, hw->aq.asq_last_status);
12044                         return -ENOTSUP;
12045                 }
12046         }
12047         return 0;
12048 }
12049
12050 /*
12051  * i40e_dcb_setup - setup dcb related config
12052  * @dev: device being configured
12053  *
12054  * Returns 0 on success, negative value on failure
12055  */
12056 static int
12057 i40e_dcb_setup(struct rte_eth_dev *dev)
12058 {
12059         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12060         struct i40e_dcbx_config dcb_cfg;
12061         uint8_t tc_map = 0;
12062         int ret = 0;
12063
12064         if ((pf->flags & I40E_FLAG_DCB) == 0) {
12065                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
12066                 return -ENOTSUP;
12067         }
12068
12069         if (pf->vf_num != 0)
12070                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
12071
12072         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
12073         if (ret) {
12074                 PMD_INIT_LOG(ERR, "invalid dcb config");
12075                 return -EINVAL;
12076         }
12077         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
12078         if (ret) {
12079                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
12080                 return -ENOSYS;
12081         }
12082
12083         return 0;
12084 }
12085
12086 static int
12087 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
12088                       struct rte_eth_dcb_info *dcb_info)
12089 {
12090         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12091         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12092         struct i40e_vsi *vsi = pf->main_vsi;
12093         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
12094         uint16_t bsf, tc_mapping;
12095         int i, j = 0;
12096
12097         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
12098                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
12099         else
12100                 dcb_info->nb_tcs = 1;
12101         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
12102                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
12103         for (i = 0; i < dcb_info->nb_tcs; i++)
12104                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
12105
12106         /* get queue mapping if vmdq is disabled */
12107         if (!pf->nb_cfg_vmdq_vsi) {
12108                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12109                         if (!(vsi->enabled_tc & (1 << i)))
12110                                 continue;
12111                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
12112                         dcb_info->tc_queue.tc_rxq[j][i].base =
12113                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12114                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12115                         dcb_info->tc_queue.tc_txq[j][i].base =
12116                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12117                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12118                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12119                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12120                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12121                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12122                 }
12123                 return 0;
12124         }
12125
12126         /* get queue mapping if vmdq is enabled */
12127         do {
12128                 vsi = pf->vmdq[j].vsi;
12129                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12130                         if (!(vsi->enabled_tc & (1 << i)))
12131                                 continue;
12132                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
12133                         dcb_info->tc_queue.tc_rxq[j][i].base =
12134                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12135                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12136                         dcb_info->tc_queue.tc_txq[j][i].base =
12137                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12138                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12139                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12140                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12141                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12142                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12143                 }
12144                 j++;
12145         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
12146         return 0;
12147 }
12148
12149 static int
12150 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
12151 {
12152         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12153         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12154         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12155         uint16_t msix_intr;
12156
12157         msix_intr = intr_handle->intr_vec[queue_id];
12158         if (msix_intr == I40E_MISC_VEC_ID)
12159                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12160                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
12161                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
12162                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12163         else
12164                 I40E_WRITE_REG(hw,
12165                                I40E_PFINT_DYN_CTLN(msix_intr -
12166                                                    I40E_RX_VEC_START),
12167                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
12168                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
12169                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12170
12171         I40E_WRITE_FLUSH(hw);
12172         rte_intr_ack(&pci_dev->intr_handle);
12173
12174         return 0;
12175 }
12176
12177 static int
12178 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
12179 {
12180         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12181         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12182         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12183         uint16_t msix_intr;
12184
12185         msix_intr = intr_handle->intr_vec[queue_id];
12186         if (msix_intr == I40E_MISC_VEC_ID)
12187                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12188                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12189         else
12190                 I40E_WRITE_REG(hw,
12191                                I40E_PFINT_DYN_CTLN(msix_intr -
12192                                                    I40E_RX_VEC_START),
12193                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12194         I40E_WRITE_FLUSH(hw);
12195
12196         return 0;
12197 }
12198
12199 /**
12200  * This function is used to check if the register is valid.
12201  * Below is the valid registers list for X722 only:
12202  * 0x2b800--0x2bb00
12203  * 0x38700--0x38a00
12204  * 0x3d800--0x3db00
12205  * 0x208e00--0x209000
12206  * 0x20be00--0x20c000
12207  * 0x263c00--0x264000
12208  * 0x265c00--0x266000
12209  */
12210 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
12211 {
12212         if ((type != I40E_MAC_X722) &&
12213             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
12214              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
12215              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
12216              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
12217              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
12218              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
12219              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
12220                 return 0;
12221         else
12222                 return 1;
12223 }
12224
12225 static int i40e_get_regs(struct rte_eth_dev *dev,
12226                          struct rte_dev_reg_info *regs)
12227 {
12228         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12229         uint32_t *ptr_data = regs->data;
12230         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
12231         const struct i40e_reg_info *reg_info;
12232
12233         if (ptr_data == NULL) {
12234                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
12235                 regs->width = sizeof(uint32_t);
12236                 return 0;
12237         }
12238
12239         /* The first few registers have to be read using AQ operations */
12240         reg_idx = 0;
12241         while (i40e_regs_adminq[reg_idx].name) {
12242                 reg_info = &i40e_regs_adminq[reg_idx++];
12243                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12244                         for (arr_idx2 = 0;
12245                                         arr_idx2 <= reg_info->count2;
12246                                         arr_idx2++) {
12247                                 reg_offset = arr_idx * reg_info->stride1 +
12248                                         arr_idx2 * reg_info->stride2;
12249                                 reg_offset += reg_info->base_addr;
12250                                 ptr_data[reg_offset >> 2] =
12251                                         i40e_read_rx_ctl(hw, reg_offset);
12252                         }
12253         }
12254
12255         /* The remaining registers can be read using primitives */
12256         reg_idx = 0;
12257         while (i40e_regs_others[reg_idx].name) {
12258                 reg_info = &i40e_regs_others[reg_idx++];
12259                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12260                         for (arr_idx2 = 0;
12261                                         arr_idx2 <= reg_info->count2;
12262                                         arr_idx2++) {
12263                                 reg_offset = arr_idx * reg_info->stride1 +
12264                                         arr_idx2 * reg_info->stride2;
12265                                 reg_offset += reg_info->base_addr;
12266                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
12267                                         ptr_data[reg_offset >> 2] = 0;
12268                                 else
12269                                         ptr_data[reg_offset >> 2] =
12270                                                 I40E_READ_REG(hw, reg_offset);
12271                         }
12272         }
12273
12274         return 0;
12275 }
12276
12277 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
12278 {
12279         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12280
12281         /* Convert word count to byte count */
12282         return hw->nvm.sr_size << 1;
12283 }
12284
12285 static int i40e_get_eeprom(struct rte_eth_dev *dev,
12286                            struct rte_dev_eeprom_info *eeprom)
12287 {
12288         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12289         uint16_t *data = eeprom->data;
12290         uint16_t offset, length, cnt_words;
12291         int ret_code;
12292
12293         offset = eeprom->offset >> 1;
12294         length = eeprom->length >> 1;
12295         cnt_words = length;
12296
12297         if (offset > hw->nvm.sr_size ||
12298                 offset + length > hw->nvm.sr_size) {
12299                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
12300                 return -EINVAL;
12301         }
12302
12303         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
12304
12305         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
12306         if (ret_code != I40E_SUCCESS || cnt_words != length) {
12307                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
12308                 return -EIO;
12309         }
12310
12311         return 0;
12312 }
12313
12314 static int i40e_get_module_info(struct rte_eth_dev *dev,
12315                                 struct rte_eth_dev_module_info *modinfo)
12316 {
12317         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12318         uint32_t sff8472_comp = 0;
12319         uint32_t sff8472_swap = 0;
12320         uint32_t sff8636_rev = 0;
12321         i40e_status status;
12322         uint32_t type = 0;
12323
12324         /* Check if firmware supports reading module EEPROM. */
12325         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
12326                 PMD_DRV_LOG(ERR,
12327                             "Module EEPROM memory read not supported. "
12328                             "Please update the NVM image.\n");
12329                 return -EINVAL;
12330         }
12331
12332         status = i40e_update_link_info(hw);
12333         if (status)
12334                 return -EIO;
12335
12336         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
12337                 PMD_DRV_LOG(ERR,
12338                             "Cannot read module EEPROM memory. "
12339                             "No module connected.\n");
12340                 return -EINVAL;
12341         }
12342
12343         type = hw->phy.link_info.module_type[0];
12344
12345         switch (type) {
12346         case I40E_MODULE_TYPE_SFP:
12347                 status = i40e_aq_get_phy_register(hw,
12348                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12349                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12350                                 I40E_MODULE_SFF_8472_COMP,
12351                                 &sff8472_comp, NULL);
12352                 if (status)
12353                         return -EIO;
12354
12355                 status = i40e_aq_get_phy_register(hw,
12356                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12357                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12358                                 I40E_MODULE_SFF_8472_SWAP,
12359                                 &sff8472_swap, NULL);
12360                 if (status)
12361                         return -EIO;
12362
12363                 /* Check if the module requires address swap to access
12364                  * the other EEPROM memory page.
12365                  */
12366                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
12367                         PMD_DRV_LOG(WARNING,
12368                                     "Module address swap to access "
12369                                     "page 0xA2 is not supported.\n");
12370                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12371                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12372                 } else if (sff8472_comp == 0x00) {
12373                         /* Module is not SFF-8472 compliant */
12374                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12375                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12376                 } else {
12377                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
12378                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
12379                 }
12380                 break;
12381         case I40E_MODULE_TYPE_QSFP_PLUS:
12382                 /* Read from memory page 0. */
12383                 status = i40e_aq_get_phy_register(hw,
12384                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12385                                 0, 1,
12386                                 I40E_MODULE_REVISION_ADDR,
12387                                 &sff8636_rev, NULL);
12388                 if (status)
12389                         return -EIO;
12390                 /* Determine revision compliance byte */
12391                 if (sff8636_rev > 0x02) {
12392                         /* Module is SFF-8636 compliant */
12393                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
12394                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12395                 } else {
12396                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
12397                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12398                 }
12399                 break;
12400         case I40E_MODULE_TYPE_QSFP28:
12401                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
12402                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12403                 break;
12404         default:
12405                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
12406                 return -EINVAL;
12407         }
12408         return 0;
12409 }
12410
12411 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
12412                                   struct rte_dev_eeprom_info *info)
12413 {
12414         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12415         bool is_sfp = false;
12416         i40e_status status;
12417         uint8_t *data;
12418         uint32_t value = 0;
12419         uint32_t i;
12420
12421         if (!info || !info->length || !info->data)
12422                 return -EINVAL;
12423
12424         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
12425                 is_sfp = true;
12426
12427         data = info->data;
12428         for (i = 0; i < info->length; i++) {
12429                 u32 offset = i + info->offset;
12430                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
12431
12432                 /* Check if we need to access the other memory page */
12433                 if (is_sfp) {
12434                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
12435                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
12436                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
12437                         }
12438                 } else {
12439                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
12440                                 /* Compute memory page number and offset. */
12441                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
12442                                 addr++;
12443                         }
12444                 }
12445                 status = i40e_aq_get_phy_register(hw,
12446                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12447                                 addr, 1, offset, &value, NULL);
12448                 if (status)
12449                         return -EIO;
12450                 data[i] = (uint8_t)value;
12451         }
12452         return 0;
12453 }
12454
12455 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
12456                                      struct rte_ether_addr *mac_addr)
12457 {
12458         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12459         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12460         struct i40e_vsi *vsi = pf->main_vsi;
12461         struct i40e_mac_filter_info mac_filter;
12462         struct i40e_mac_filter *f;
12463         int ret;
12464
12465         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
12466                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
12467                 return -EINVAL;
12468         }
12469
12470         TAILQ_FOREACH(f, &vsi->mac_list, next) {
12471                 if (rte_is_same_ether_addr(&pf->dev_addr,
12472                                                 &f->mac_info.mac_addr))
12473                         break;
12474         }
12475
12476         if (f == NULL) {
12477                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
12478                 return -EIO;
12479         }
12480
12481         mac_filter = f->mac_info;
12482         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
12483         if (ret != I40E_SUCCESS) {
12484                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
12485                 return -EIO;
12486         }
12487         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
12488         ret = i40e_vsi_add_mac(vsi, &mac_filter);
12489         if (ret != I40E_SUCCESS) {
12490                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
12491                 return -EIO;
12492         }
12493         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
12494
12495         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
12496                                         mac_addr->addr_bytes, NULL);
12497         if (ret != I40E_SUCCESS) {
12498                 PMD_DRV_LOG(ERR, "Failed to change mac");
12499                 return -EIO;
12500         }
12501
12502         return 0;
12503 }
12504
12505 static int
12506 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
12507 {
12508         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12509         struct rte_eth_dev_data *dev_data = pf->dev_data;
12510         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
12511         int ret = 0;
12512
12513         /* check if mtu is within the allowed range */
12514         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
12515                 return -EINVAL;
12516
12517         /* mtu setting is forbidden if port is start */
12518         if (dev_data->dev_started) {
12519                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
12520                             dev_data->port_id);
12521                 return -EBUSY;
12522         }
12523
12524         if (frame_size > RTE_ETHER_MAX_LEN)
12525                 dev_data->dev_conf.rxmode.offloads |=
12526                         DEV_RX_OFFLOAD_JUMBO_FRAME;
12527         else
12528                 dev_data->dev_conf.rxmode.offloads &=
12529                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
12530
12531         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
12532
12533         return ret;
12534 }
12535
12536 /* Restore ethertype filter */
12537 static void
12538 i40e_ethertype_filter_restore(struct i40e_pf *pf)
12539 {
12540         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12541         struct i40e_ethertype_filter_list
12542                 *ethertype_list = &pf->ethertype.ethertype_list;
12543         struct i40e_ethertype_filter *f;
12544         struct i40e_control_filter_stats stats;
12545         uint16_t flags;
12546
12547         TAILQ_FOREACH(f, ethertype_list, rules) {
12548                 flags = 0;
12549                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
12550                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
12551                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
12552                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
12553                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
12554
12555                 memset(&stats, 0, sizeof(stats));
12556                 i40e_aq_add_rem_control_packet_filter(hw,
12557                                             f->input.mac_addr.addr_bytes,
12558                                             f->input.ether_type,
12559                                             flags, pf->main_vsi->seid,
12560                                             f->queue, 1, &stats, NULL);
12561         }
12562         PMD_DRV_LOG(INFO, "Ethertype filter:"
12563                     " mac_etype_used = %u, etype_used = %u,"
12564                     " mac_etype_free = %u, etype_free = %u",
12565                     stats.mac_etype_used, stats.etype_used,
12566                     stats.mac_etype_free, stats.etype_free);
12567 }
12568
12569 /* Restore tunnel filter */
12570 static void
12571 i40e_tunnel_filter_restore(struct i40e_pf *pf)
12572 {
12573         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12574         struct i40e_vsi *vsi;
12575         struct i40e_pf_vf *vf;
12576         struct i40e_tunnel_filter_list
12577                 *tunnel_list = &pf->tunnel.tunnel_list;
12578         struct i40e_tunnel_filter *f;
12579         struct i40e_aqc_cloud_filters_element_bb cld_filter;
12580         bool big_buffer = 0;
12581
12582         TAILQ_FOREACH(f, tunnel_list, rules) {
12583                 if (!f->is_to_vf)
12584                         vsi = pf->main_vsi;
12585                 else {
12586                         vf = &pf->vfs[f->vf_id];
12587                         vsi = vf->vsi;
12588                 }
12589                 memset(&cld_filter, 0, sizeof(cld_filter));
12590                 rte_ether_addr_copy((struct rte_ether_addr *)
12591                                 &f->input.outer_mac,
12592                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
12593                 rte_ether_addr_copy((struct rte_ether_addr *)
12594                                 &f->input.inner_mac,
12595                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
12596                 cld_filter.element.inner_vlan = f->input.inner_vlan;
12597                 cld_filter.element.flags = f->input.flags;
12598                 cld_filter.element.tenant_id = f->input.tenant_id;
12599                 cld_filter.element.queue_number = f->queue;
12600                 rte_memcpy(cld_filter.general_fields,
12601                            f->input.general_fields,
12602                            sizeof(f->input.general_fields));
12603
12604                 if (((f->input.flags &
12605                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
12606                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
12607                     ((f->input.flags &
12608                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
12609                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
12610                     ((f->input.flags &
12611                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
12612                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
12613                         big_buffer = 1;
12614
12615                 if (big_buffer)
12616                         i40e_aq_add_cloud_filters_bb(hw,
12617                                         vsi->seid, &cld_filter, 1);
12618                 else
12619                         i40e_aq_add_cloud_filters(hw, vsi->seid,
12620                                                   &cld_filter.element, 1);
12621         }
12622 }
12623
12624 /* Restore RSS filter */
12625 static inline void
12626 i40e_rss_filter_restore(struct i40e_pf *pf)
12627 {
12628         struct i40e_rss_conf_list *list = &pf->rss_config_list;
12629         struct i40e_rss_filter *filter;
12630
12631         TAILQ_FOREACH(filter, list, next) {
12632                 i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
12633         }
12634 }
12635
12636 static void
12637 i40e_filter_restore(struct i40e_pf *pf)
12638 {
12639         i40e_ethertype_filter_restore(pf);
12640         i40e_tunnel_filter_restore(pf);
12641         i40e_fdir_filter_restore(pf);
12642         i40e_rss_filter_restore(pf);
12643 }
12644
12645 bool
12646 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
12647 {
12648         if (strcmp(dev->device->driver->name, drv->driver.name))
12649                 return false;
12650
12651         return true;
12652 }
12653
12654 bool
12655 is_i40e_supported(struct rte_eth_dev *dev)
12656 {
12657         return is_device_supported(dev, &rte_i40e_pmd);
12658 }
12659
12660 struct i40e_customized_pctype*
12661 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
12662 {
12663         int i;
12664
12665         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
12666                 if (pf->customized_pctype[i].index == index)
12667                         return &pf->customized_pctype[i];
12668         }
12669         return NULL;
12670 }
12671
12672 static int
12673 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
12674                               uint32_t pkg_size, uint32_t proto_num,
12675                               struct rte_pmd_i40e_proto_info *proto,
12676                               enum rte_pmd_i40e_package_op op)
12677 {
12678         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12679         uint32_t pctype_num;
12680         struct rte_pmd_i40e_ptype_info *pctype;
12681         uint32_t buff_size;
12682         struct i40e_customized_pctype *new_pctype = NULL;
12683         uint8_t proto_id;
12684         uint8_t pctype_value;
12685         char name[64];
12686         uint32_t i, j, n;
12687         int ret;
12688
12689         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12690             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12691                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12692                 return -1;
12693         }
12694
12695         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12696                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
12697                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
12698         if (ret) {
12699                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
12700                 return -1;
12701         }
12702         if (!pctype_num) {
12703                 PMD_DRV_LOG(INFO, "No new pctype added");
12704                 return -1;
12705         }
12706
12707         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
12708         pctype = rte_zmalloc("new_pctype", buff_size, 0);
12709         if (!pctype) {
12710                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12711                 return -1;
12712         }
12713         /* get information about new pctype list */
12714         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12715                                         (uint8_t *)pctype, buff_size,
12716                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
12717         if (ret) {
12718                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
12719                 rte_free(pctype);
12720                 return -1;
12721         }
12722
12723         /* Update customized pctype. */
12724         for (i = 0; i < pctype_num; i++) {
12725                 pctype_value = pctype[i].ptype_id;
12726                 memset(name, 0, sizeof(name));
12727                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12728                         proto_id = pctype[i].protocols[j];
12729                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12730                                 continue;
12731                         for (n = 0; n < proto_num; n++) {
12732                                 if (proto[n].proto_id != proto_id)
12733                                         continue;
12734                                 strlcat(name, proto[n].name, sizeof(name));
12735                                 strlcat(name, "_", sizeof(name));
12736                                 break;
12737                         }
12738                 }
12739                 name[strlen(name) - 1] = '\0';
12740                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12741                 if (!strcmp(name, "GTPC"))
12742                         new_pctype =
12743                                 i40e_find_customized_pctype(pf,
12744                                                       I40E_CUSTOMIZED_GTPC);
12745                 else if (!strcmp(name, "GTPU_IPV4"))
12746                         new_pctype =
12747                                 i40e_find_customized_pctype(pf,
12748                                                    I40E_CUSTOMIZED_GTPU_IPV4);
12749                 else if (!strcmp(name, "GTPU_IPV6"))
12750                         new_pctype =
12751                                 i40e_find_customized_pctype(pf,
12752                                                    I40E_CUSTOMIZED_GTPU_IPV6);
12753                 else if (!strcmp(name, "GTPU"))
12754                         new_pctype =
12755                                 i40e_find_customized_pctype(pf,
12756                                                       I40E_CUSTOMIZED_GTPU);
12757                 else if (!strcmp(name, "IPV4_L2TPV3"))
12758                         new_pctype =
12759                                 i40e_find_customized_pctype(pf,
12760                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
12761                 else if (!strcmp(name, "IPV6_L2TPV3"))
12762                         new_pctype =
12763                                 i40e_find_customized_pctype(pf,
12764                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
12765                 else if (!strcmp(name, "IPV4_ESP"))
12766                         new_pctype =
12767                                 i40e_find_customized_pctype(pf,
12768                                                 I40E_CUSTOMIZED_ESP_IPV4);
12769                 else if (!strcmp(name, "IPV6_ESP"))
12770                         new_pctype =
12771                                 i40e_find_customized_pctype(pf,
12772                                                 I40E_CUSTOMIZED_ESP_IPV6);
12773                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12774                         new_pctype =
12775                                 i40e_find_customized_pctype(pf,
12776                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12777                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12778                         new_pctype =
12779                                 i40e_find_customized_pctype(pf,
12780                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12781                 else if (!strcmp(name, "IPV4_AH"))
12782                         new_pctype =
12783                                 i40e_find_customized_pctype(pf,
12784                                                 I40E_CUSTOMIZED_AH_IPV4);
12785                 else if (!strcmp(name, "IPV6_AH"))
12786                         new_pctype =
12787                                 i40e_find_customized_pctype(pf,
12788                                                 I40E_CUSTOMIZED_AH_IPV6);
12789                 if (new_pctype) {
12790                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12791                                 new_pctype->pctype = pctype_value;
12792                                 new_pctype->valid = true;
12793                         } else {
12794                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12795                                 new_pctype->valid = false;
12796                         }
12797                 }
12798         }
12799
12800         rte_free(pctype);
12801         return 0;
12802 }
12803
12804 static int
12805 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12806                              uint32_t pkg_size, uint32_t proto_num,
12807                              struct rte_pmd_i40e_proto_info *proto,
12808                              enum rte_pmd_i40e_package_op op)
12809 {
12810         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12811         uint16_t port_id = dev->data->port_id;
12812         uint32_t ptype_num;
12813         struct rte_pmd_i40e_ptype_info *ptype;
12814         uint32_t buff_size;
12815         uint8_t proto_id;
12816         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12817         uint32_t i, j, n;
12818         bool in_tunnel;
12819         int ret;
12820
12821         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12822             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12823                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12824                 return -1;
12825         }
12826
12827         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12828                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12829                 return 0;
12830         }
12831
12832         /* get information about new ptype num */
12833         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12834                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12835                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12836         if (ret) {
12837                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12838                 return ret;
12839         }
12840         if (!ptype_num) {
12841                 PMD_DRV_LOG(INFO, "No new ptype added");
12842                 return -1;
12843         }
12844
12845         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12846         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12847         if (!ptype) {
12848                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12849                 return -1;
12850         }
12851
12852         /* get information about new ptype list */
12853         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12854                                         (uint8_t *)ptype, buff_size,
12855                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12856         if (ret) {
12857                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12858                 rte_free(ptype);
12859                 return ret;
12860         }
12861
12862         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12863         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12864         if (!ptype_mapping) {
12865                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12866                 rte_free(ptype);
12867                 return -1;
12868         }
12869
12870         /* Update ptype mapping table. */
12871         for (i = 0; i < ptype_num; i++) {
12872                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12873                 ptype_mapping[i].sw_ptype = 0;
12874                 in_tunnel = false;
12875                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12876                         proto_id = ptype[i].protocols[j];
12877                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12878                                 continue;
12879                         for (n = 0; n < proto_num; n++) {
12880                                 if (proto[n].proto_id != proto_id)
12881                                         continue;
12882                                 memset(name, 0, sizeof(name));
12883                                 strcpy(name, proto[n].name);
12884                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12885                                 if (!strncasecmp(name, "PPPOE", 5))
12886                                         ptype_mapping[i].sw_ptype |=
12887                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12888                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12889                                          !in_tunnel) {
12890                                         ptype_mapping[i].sw_ptype |=
12891                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12892                                         ptype_mapping[i].sw_ptype |=
12893                                                 RTE_PTYPE_L4_FRAG;
12894                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12895                                            in_tunnel) {
12896                                         ptype_mapping[i].sw_ptype |=
12897                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12898                                         ptype_mapping[i].sw_ptype |=
12899                                                 RTE_PTYPE_INNER_L4_FRAG;
12900                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12901                                         ptype_mapping[i].sw_ptype |=
12902                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12903                                         in_tunnel = true;
12904                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12905                                            !in_tunnel)
12906                                         ptype_mapping[i].sw_ptype |=
12907                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12908                                 else if (!strncasecmp(name, "IPV4", 4) &&
12909                                          in_tunnel)
12910                                         ptype_mapping[i].sw_ptype |=
12911                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12912                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12913                                          !in_tunnel) {
12914                                         ptype_mapping[i].sw_ptype |=
12915                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12916                                         ptype_mapping[i].sw_ptype |=
12917                                                 RTE_PTYPE_L4_FRAG;
12918                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12919                                            in_tunnel) {
12920                                         ptype_mapping[i].sw_ptype |=
12921                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12922                                         ptype_mapping[i].sw_ptype |=
12923                                                 RTE_PTYPE_INNER_L4_FRAG;
12924                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12925                                         ptype_mapping[i].sw_ptype |=
12926                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12927                                         in_tunnel = true;
12928                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12929                                            !in_tunnel)
12930                                         ptype_mapping[i].sw_ptype |=
12931                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12932                                 else if (!strncasecmp(name, "IPV6", 4) &&
12933                                          in_tunnel)
12934                                         ptype_mapping[i].sw_ptype |=
12935                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12936                                 else if (!strncasecmp(name, "UDP", 3) &&
12937                                          !in_tunnel)
12938                                         ptype_mapping[i].sw_ptype |=
12939                                                 RTE_PTYPE_L4_UDP;
12940                                 else if (!strncasecmp(name, "UDP", 3) &&
12941                                          in_tunnel)
12942                                         ptype_mapping[i].sw_ptype |=
12943                                                 RTE_PTYPE_INNER_L4_UDP;
12944                                 else if (!strncasecmp(name, "TCP", 3) &&
12945                                          !in_tunnel)
12946                                         ptype_mapping[i].sw_ptype |=
12947                                                 RTE_PTYPE_L4_TCP;
12948                                 else if (!strncasecmp(name, "TCP", 3) &&
12949                                          in_tunnel)
12950                                         ptype_mapping[i].sw_ptype |=
12951                                                 RTE_PTYPE_INNER_L4_TCP;
12952                                 else if (!strncasecmp(name, "SCTP", 4) &&
12953                                          !in_tunnel)
12954                                         ptype_mapping[i].sw_ptype |=
12955                                                 RTE_PTYPE_L4_SCTP;
12956                                 else if (!strncasecmp(name, "SCTP", 4) &&
12957                                          in_tunnel)
12958                                         ptype_mapping[i].sw_ptype |=
12959                                                 RTE_PTYPE_INNER_L4_SCTP;
12960                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12961                                           !strncasecmp(name, "ICMPV6", 6)) &&
12962                                          !in_tunnel)
12963                                         ptype_mapping[i].sw_ptype |=
12964                                                 RTE_PTYPE_L4_ICMP;
12965                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12966                                           !strncasecmp(name, "ICMPV6", 6)) &&
12967                                          in_tunnel)
12968                                         ptype_mapping[i].sw_ptype |=
12969                                                 RTE_PTYPE_INNER_L4_ICMP;
12970                                 else if (!strncasecmp(name, "GTPC", 4)) {
12971                                         ptype_mapping[i].sw_ptype |=
12972                                                 RTE_PTYPE_TUNNEL_GTPC;
12973                                         in_tunnel = true;
12974                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12975                                         ptype_mapping[i].sw_ptype |=
12976                                                 RTE_PTYPE_TUNNEL_GTPU;
12977                                         in_tunnel = true;
12978                                 } else if (!strncasecmp(name, "ESP", 3)) {
12979                                         ptype_mapping[i].sw_ptype |=
12980                                                 RTE_PTYPE_TUNNEL_ESP;
12981                                         in_tunnel = true;
12982                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12983                                         ptype_mapping[i].sw_ptype |=
12984                                                 RTE_PTYPE_TUNNEL_GRENAT;
12985                                         in_tunnel = true;
12986                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12987                                            !strncasecmp(name, "L2TPV2", 6) ||
12988                                            !strncasecmp(name, "L2TPV3", 6)) {
12989                                         ptype_mapping[i].sw_ptype |=
12990                                                 RTE_PTYPE_TUNNEL_L2TP;
12991                                         in_tunnel = true;
12992                                 }
12993
12994                                 break;
12995                         }
12996                 }
12997         }
12998
12999         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
13000                                                 ptype_num, 0);
13001         if (ret)
13002                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
13003
13004         rte_free(ptype_mapping);
13005         rte_free(ptype);
13006         return ret;
13007 }
13008
13009 void
13010 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
13011                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
13012 {
13013         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
13014         uint32_t proto_num;
13015         struct rte_pmd_i40e_proto_info *proto;
13016         uint32_t buff_size;
13017         uint32_t i;
13018         int ret;
13019
13020         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
13021             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
13022                 PMD_DRV_LOG(ERR, "Unsupported operation.");
13023                 return;
13024         }
13025
13026         /* get information about protocol number */
13027         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
13028                                        (uint8_t *)&proto_num, sizeof(proto_num),
13029                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
13030         if (ret) {
13031                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
13032                 return;
13033         }
13034         if (!proto_num) {
13035                 PMD_DRV_LOG(INFO, "No new protocol added");
13036                 return;
13037         }
13038
13039         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
13040         proto = rte_zmalloc("new_proto", buff_size, 0);
13041         if (!proto) {
13042                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
13043                 return;
13044         }
13045
13046         /* get information about protocol list */
13047         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
13048                                         (uint8_t *)proto, buff_size,
13049                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
13050         if (ret) {
13051                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
13052                 rte_free(proto);
13053                 return;
13054         }
13055
13056         /* Check if GTP is supported. */
13057         for (i = 0; i < proto_num; i++) {
13058                 if (!strncmp(proto[i].name, "GTP", 3)) {
13059                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
13060                                 pf->gtp_support = true;
13061                         else
13062                                 pf->gtp_support = false;
13063                         break;
13064                 }
13065         }
13066
13067         /* Check if ESP is supported. */
13068         for (i = 0; i < proto_num; i++) {
13069                 if (!strncmp(proto[i].name, "ESP", 3)) {
13070                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
13071                                 pf->esp_support = true;
13072                         else
13073                                 pf->esp_support = false;
13074                         break;
13075                 }
13076         }
13077
13078         /* Update customized pctype info */
13079         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
13080                                             proto_num, proto, op);
13081         if (ret)
13082                 PMD_DRV_LOG(INFO, "No pctype is updated.");
13083
13084         /* Update customized ptype info */
13085         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
13086                                            proto_num, proto, op);
13087         if (ret)
13088                 PMD_DRV_LOG(INFO, "No ptype is updated.");
13089
13090         rte_free(proto);
13091 }
13092
13093 /* Create a QinQ cloud filter
13094  *
13095  * The Fortville NIC has limited resources for tunnel filters,
13096  * so we can only reuse existing filters.
13097  *
13098  * In step 1 we define which Field Vector fields can be used for
13099  * filter types.
13100  * As we do not have the inner tag defined as a field,
13101  * we have to define it first, by reusing one of L1 entries.
13102  *
13103  * In step 2 we are replacing one of existing filter types with
13104  * a new one for QinQ.
13105  * As we reusing L1 and replacing L2, some of the default filter
13106  * types will disappear,which depends on L1 and L2 entries we reuse.
13107  *
13108  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
13109  *
13110  * 1.   Create L1 filter of outer vlan (12b) which will be in use
13111  *              later when we define the cloud filter.
13112  *      a.      Valid_flags.replace_cloud = 0
13113  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
13114  *      c.      New_filter = 0x10
13115  *      d.      TR bit = 0xff (optional, not used here)
13116  *      e.      Buffer â€“ 2 entries:
13117  *              i.      Byte 0 = 8 (outer vlan FV index).
13118  *                      Byte 1 = 0 (rsv)
13119  *                      Byte 2-3 = 0x0fff
13120  *              ii.     Byte 0 = 37 (inner vlan FV index).
13121  *                      Byte 1 =0 (rsv)
13122  *                      Byte 2-3 = 0x0fff
13123  *
13124  * Step 2:
13125  * 2.   Create cloud filter using two L1 filters entries: stag and
13126  *              new filter(outer vlan+ inner vlan)
13127  *      a.      Valid_flags.replace_cloud = 1
13128  *      b.      Old_filter = 1 (instead of outer IP)
13129  *      c.      New_filter = 0x10
13130  *      d.      Buffer â€“ 2 entries:
13131  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
13132  *                      Byte 1-3 = 0 (rsv)
13133  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
13134  *                      Byte 9-11 = 0 (rsv)
13135  */
13136 static int
13137 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
13138 {
13139         int ret = -ENOTSUP;
13140         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
13141         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
13142         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13143         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
13144
13145         if (pf->support_multi_driver) {
13146                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
13147                 return ret;
13148         }
13149
13150         /* Init */
13151         memset(&filter_replace, 0,
13152                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13153         memset(&filter_replace_buf, 0,
13154                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13155
13156         /* create L1 filter */
13157         filter_replace.old_filter_type =
13158                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
13159         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13160         filter_replace.tr_bit = 0;
13161
13162         /* Prepare the buffer, 2 entries */
13163         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
13164         filter_replace_buf.data[0] |=
13165                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13166         /* Field Vector 12b mask */
13167         filter_replace_buf.data[2] = 0xff;
13168         filter_replace_buf.data[3] = 0x0f;
13169         filter_replace_buf.data[4] =
13170                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
13171         filter_replace_buf.data[4] |=
13172                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13173         /* Field Vector 12b mask */
13174         filter_replace_buf.data[6] = 0xff;
13175         filter_replace_buf.data[7] = 0x0f;
13176         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13177                         &filter_replace_buf);
13178         if (ret != I40E_SUCCESS)
13179                 return ret;
13180
13181         if (filter_replace.old_filter_type !=
13182             filter_replace.new_filter_type)
13183                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
13184                             " original: 0x%x, new: 0x%x",
13185                             dev->device->name,
13186                             filter_replace.old_filter_type,
13187                             filter_replace.new_filter_type);
13188
13189         /* Apply the second L2 cloud filter */
13190         memset(&filter_replace, 0,
13191                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13192         memset(&filter_replace_buf, 0,
13193                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13194
13195         /* create L2 filter, input for L2 filter will be L1 filter  */
13196         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
13197         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
13198         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13199
13200         /* Prepare the buffer, 2 entries */
13201         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
13202         filter_replace_buf.data[0] |=
13203                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13204         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13205         filter_replace_buf.data[4] |=
13206                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13207         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13208                         &filter_replace_buf);
13209         if (!ret && (filter_replace.old_filter_type !=
13210                      filter_replace.new_filter_type))
13211                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
13212                             " original: 0x%x, new: 0x%x",
13213                             dev->device->name,
13214                             filter_replace.old_filter_type,
13215                             filter_replace.new_filter_type);
13216
13217         return ret;
13218 }
13219
13220 int
13221 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
13222                    const struct rte_flow_action_rss *in)
13223 {
13224         if (in->key_len > RTE_DIM(out->key) ||
13225             in->queue_num > RTE_DIM(out->queue))
13226                 return -EINVAL;
13227         if (!in->key && in->key_len)
13228                 return -EINVAL;
13229         out->conf = (struct rte_flow_action_rss){
13230                 .func = in->func,
13231                 .level = in->level,
13232                 .types = in->types,
13233                 .key_len = in->key_len,
13234                 .queue_num = in->queue_num,
13235                 .queue = memcpy(out->queue, in->queue,
13236                                 sizeof(*in->queue) * in->queue_num),
13237         };
13238         if (in->key)
13239                 out->conf.key = memcpy(out->key, in->key, in->key_len);
13240         return 0;
13241 }
13242
13243 /* Write HENA register to enable hash */
13244 static int
13245 i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
13246 {
13247         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13248         uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
13249         uint64_t hena;
13250         int ret;
13251
13252         ret = i40e_set_rss_key(pf->main_vsi, key,
13253                                rss_conf->conf.key_len);
13254         if (ret)
13255                 return ret;
13256
13257         hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
13258         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
13259         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
13260         I40E_WRITE_FLUSH(hw);
13261
13262         return 0;
13263 }
13264
13265 /* Configure hash input set */
13266 static int
13267 i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
13268 {
13269         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13270         struct rte_eth_input_set_conf conf;
13271         uint64_t mask0;
13272         int ret = 0;
13273         uint32_t j;
13274         int i;
13275         static const struct {
13276                 uint64_t type;
13277                 enum rte_eth_input_set_field field;
13278         } inset_match_table[] = {
13279                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
13280                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13281                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
13282                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13283                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
13284                         RTE_ETH_INPUT_SET_UNKNOWN},
13285                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
13286                         RTE_ETH_INPUT_SET_UNKNOWN},
13287
13288                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
13289                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13290                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
13291                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13292                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
13293                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13294                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
13295                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13296
13297                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
13298                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13299                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
13300                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13301                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
13302                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13303                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
13304                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13305
13306                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
13307                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13308                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
13309                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13310                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
13311                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13312                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
13313                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13314
13315                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
13316                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13317                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
13318                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13319                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
13320                         RTE_ETH_INPUT_SET_UNKNOWN},
13321                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
13322                         RTE_ETH_INPUT_SET_UNKNOWN},
13323
13324                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
13325                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13326                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
13327                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13328                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
13329                         RTE_ETH_INPUT_SET_UNKNOWN},
13330                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
13331                         RTE_ETH_INPUT_SET_UNKNOWN},
13332
13333                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
13334                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13335                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
13336                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13337                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
13338                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13339                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
13340                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13341
13342                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
13343                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13344                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
13345                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13346                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
13347                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13348                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
13349                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13350
13351                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
13352                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13353                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
13354                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13355                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
13356                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13357                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
13358                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13359
13360                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
13361                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13362                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
13363                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13364                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
13365                         RTE_ETH_INPUT_SET_UNKNOWN},
13366                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
13367                         RTE_ETH_INPUT_SET_UNKNOWN},
13368         };
13369
13370         mask0 = types & pf->adapter->flow_types_mask;
13371         conf.op = RTE_ETH_INPUT_SET_SELECT;
13372         conf.inset_size = 0;
13373         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
13374                 if (mask0 & (1ULL << i)) {
13375                         conf.flow_type = i;
13376                         break;
13377                 }
13378         }
13379
13380         for (j = 0; j < RTE_DIM(inset_match_table); j++) {
13381                 if ((types & inset_match_table[j].type) ==
13382                     inset_match_table[j].type) {
13383                         if (inset_match_table[j].field ==
13384                             RTE_ETH_INPUT_SET_UNKNOWN)
13385                                 return -EINVAL;
13386
13387                         conf.field[conf.inset_size] =
13388                                 inset_match_table[j].field;
13389                         conf.inset_size++;
13390                 }
13391         }
13392
13393         if (conf.inset_size) {
13394                 ret = i40e_hash_filter_inset_select(hw, &conf);
13395                 if (ret)
13396                         return ret;
13397         }
13398
13399         return ret;
13400 }
13401
13402 /* Look up the conflicted rule then mark it as invalid */
13403 static void
13404 i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
13405                 struct i40e_rte_flow_rss_conf *conf)
13406 {
13407         struct i40e_rss_filter *rss_item;
13408         uint64_t rss_inset;
13409
13410         /* Clear input set bits before comparing the pctype */
13411         rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
13412                 ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
13413
13414         /* Look up the conflicted rule then mark it as invalid */
13415         TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
13416                 if (!rss_item->rss_filter_info.valid)
13417                         continue;
13418
13419                 if (conf->conf.queue_num &&
13420                     rss_item->rss_filter_info.conf.queue_num)
13421                         rss_item->rss_filter_info.valid = false;
13422
13423                 if (conf->conf.types &&
13424                     (rss_item->rss_filter_info.conf.types &
13425                     rss_inset) ==
13426                     (conf->conf.types & rss_inset))
13427                         rss_item->rss_filter_info.valid = false;
13428
13429                 if (conf->conf.func ==
13430                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
13431                     rss_item->rss_filter_info.conf.func ==
13432                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13433                         rss_item->rss_filter_info.valid = false;
13434         }
13435 }
13436
13437 /* Configure RSS hash function */
13438 static int
13439 i40e_rss_config_hash_function(struct i40e_pf *pf,
13440                 struct i40e_rte_flow_rss_conf *conf)
13441 {
13442         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13443         uint32_t reg, i;
13444         uint64_t mask0;
13445         uint16_t j;
13446
13447         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13448                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13449                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
13450                         PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
13451                         I40E_WRITE_FLUSH(hw);
13452                         i40e_rss_mark_invalid_rule(pf, conf);
13453
13454                         return 0;
13455                 }
13456                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
13457
13458                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13459                 I40E_WRITE_FLUSH(hw);
13460                 i40e_rss_mark_invalid_rule(pf, conf);
13461         } else if (conf->conf.func ==
13462                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13463                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13464
13465                 i40e_set_symmetric_hash_enable_per_port(hw, 1);
13466                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13467                         if (mask0 & (1UL << i))
13468                                 break;
13469                 }
13470
13471                 if (i == UINT64_BIT)
13472                         return -EINVAL;
13473
13474                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13475                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13476                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13477                                 i40e_write_global_rx_ctl(hw,
13478                                         I40E_GLQF_HSYM(j),
13479                                         I40E_GLQF_HSYM_SYMH_ENA_MASK);
13480                 }
13481         }
13482
13483         return 0;
13484 }
13485
13486 /* Enable RSS according to the configuration */
13487 static int
13488 i40e_rss_enable_hash(struct i40e_pf *pf,
13489                 struct i40e_rte_flow_rss_conf *conf)
13490 {
13491         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13492         struct i40e_rte_flow_rss_conf rss_conf;
13493
13494         if (!(conf->conf.types & pf->adapter->flow_types_mask))
13495                 return -ENOTSUP;
13496
13497         memset(&rss_conf, 0, sizeof(rss_conf));
13498         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13499
13500         /* Configure hash input set */
13501         if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
13502                 return -EINVAL;
13503
13504         if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
13505             (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
13506                 /* Random default keys */
13507                 static uint32_t rss_key_default[] = {0x6b793944,
13508                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
13509                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
13510                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
13511
13512                 rss_conf.conf.key = (uint8_t *)rss_key_default;
13513                 rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
13514                                 sizeof(uint32_t);
13515                 PMD_DRV_LOG(INFO,
13516                         "No valid RSS key config for i40e, using default\n");
13517         }
13518
13519         rss_conf.conf.types |= rss_info->conf.types;
13520         i40e_rss_hash_set(pf, &rss_conf);
13521
13522         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
13523                 i40e_rss_config_hash_function(pf, conf);
13524
13525         i40e_rss_mark_invalid_rule(pf, conf);
13526
13527         return 0;
13528 }
13529
13530 /* Configure RSS queue region */
13531 static int
13532 i40e_rss_config_queue_region(struct i40e_pf *pf,
13533                 struct i40e_rte_flow_rss_conf *conf)
13534 {
13535         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13536         uint32_t lut = 0;
13537         uint16_t j, num;
13538         uint32_t i;
13539
13540         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13541          * It's necessary to calculate the actual PF queues that are configured.
13542          */
13543         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13544                 num = i40e_pf_calc_configured_queues_num(pf);
13545         else
13546                 num = pf->dev_data->nb_rx_queues;
13547
13548         num = RTE_MIN(num, conf->conf.queue_num);
13549         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13550                         num);
13551
13552         if (num == 0) {
13553                 PMD_DRV_LOG(ERR,
13554                         "No PF queues are configured to enable RSS for port %u",
13555                         pf->dev_data->port_id);
13556                 return -ENOTSUP;
13557         }
13558
13559         /* Fill in redirection table */
13560         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13561                 if (j == num)
13562                         j = 0;
13563                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
13564                         hw->func_caps.rss_table_entry_width) - 1));
13565                 if ((i & 3) == 3)
13566                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13567         }
13568
13569         i40e_rss_mark_invalid_rule(pf, conf);
13570
13571         return 0;
13572 }
13573
13574 /* Configure RSS hash function to default */
13575 static int
13576 i40e_rss_clear_hash_function(struct i40e_pf *pf,
13577                 struct i40e_rte_flow_rss_conf *conf)
13578 {
13579         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13580         uint32_t i, reg;
13581         uint64_t mask0;
13582         uint16_t j;
13583
13584         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13585                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13586                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
13587                         PMD_DRV_LOG(DEBUG,
13588                                 "Hash function already set to Toeplitz");
13589                         I40E_WRITE_FLUSH(hw);
13590
13591                         return 0;
13592                 }
13593                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
13594
13595                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13596                 I40E_WRITE_FLUSH(hw);
13597         } else if (conf->conf.func ==
13598                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13599                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13600
13601                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13602                         if (mask0 & (1UL << i))
13603                                 break;
13604                 }
13605
13606                 if (i == UINT64_BIT)
13607                         return -EINVAL;
13608
13609                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13610                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13611                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13612                                 i40e_write_global_rx_ctl(hw,
13613                                         I40E_GLQF_HSYM(j),
13614                                         0);
13615                 }
13616         }
13617
13618         return 0;
13619 }
13620
13621 /* Disable RSS hash and configure default input set */
13622 static int
13623 i40e_rss_disable_hash(struct i40e_pf *pf,
13624                 struct i40e_rte_flow_rss_conf *conf)
13625 {
13626         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13627         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13628         struct i40e_rte_flow_rss_conf rss_conf;
13629         uint32_t i;
13630
13631         memset(&rss_conf, 0, sizeof(rss_conf));
13632         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13633
13634         /* Disable RSS hash */
13635         rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
13636         i40e_rss_hash_set(pf, &rss_conf);
13637
13638         for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
13639                 if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
13640                     !(conf->conf.types & (1ULL << i)))
13641                         continue;
13642
13643                 /* Configure default input set */
13644                 struct rte_eth_input_set_conf input_conf = {
13645                         .op = RTE_ETH_INPUT_SET_SELECT,
13646                         .flow_type = i,
13647                         .inset_size = 1,
13648                 };
13649                 input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
13650                 i40e_hash_filter_inset_select(hw, &input_conf);
13651         }
13652
13653         rss_info->conf.types = rss_conf.conf.types;
13654
13655         i40e_rss_clear_hash_function(pf, conf);
13656
13657         return 0;
13658 }
13659
13660 /* Configure RSS queue region to default */
13661 static int
13662 i40e_rss_clear_queue_region(struct i40e_pf *pf)
13663 {
13664         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13665         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13666         uint16_t queue[I40E_MAX_Q_PER_TC];
13667         uint32_t num_rxq, i;
13668         uint32_t lut = 0;
13669         uint16_t j, num;
13670
13671         num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
13672
13673         for (j = 0; j < num_rxq; j++)
13674                 queue[j] = j;
13675
13676         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13677          * It's necessary to calculate the actual PF queues that are configured.
13678          */
13679         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13680                 num = i40e_pf_calc_configured_queues_num(pf);
13681         else
13682                 num = pf->dev_data->nb_rx_queues;
13683
13684         num = RTE_MIN(num, num_rxq);
13685         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13686                         num);
13687
13688         if (num == 0) {
13689                 PMD_DRV_LOG(ERR,
13690                         "No PF queues are configured to enable RSS for port %u",
13691                         pf->dev_data->port_id);
13692                 return -ENOTSUP;
13693         }
13694
13695         /* Fill in redirection table */
13696         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13697                 if (j == num)
13698                         j = 0;
13699                 lut = (lut << 8) | (queue[j] & ((0x1 <<
13700                         hw->func_caps.rss_table_entry_width) - 1));
13701                 if ((i & 3) == 3)
13702                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13703         }
13704
13705         rss_info->conf.queue_num = 0;
13706         memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
13707
13708         return 0;
13709 }
13710
13711 int
13712 i40e_config_rss_filter(struct i40e_pf *pf,
13713                 struct i40e_rte_flow_rss_conf *conf, bool add)
13714 {
13715         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13716         struct rte_flow_action_rss update_conf = rss_info->conf;
13717         int ret = 0;
13718
13719         if (add) {
13720                 if (conf->conf.queue_num) {
13721                         /* Configure RSS queue region */
13722                         ret = i40e_rss_config_queue_region(pf, conf);
13723                         if (ret)
13724                                 return ret;
13725
13726                         update_conf.queue_num = conf->conf.queue_num;
13727                         update_conf.queue = conf->conf.queue;
13728                 } else if (conf->conf.func ==
13729                            RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13730                         /* Configure hash function */
13731                         ret = i40e_rss_config_hash_function(pf, conf);
13732                         if (ret)
13733                                 return ret;
13734
13735                         update_conf.func = conf->conf.func;
13736                 } else {
13737                         /* Configure hash enable and input set */
13738                         ret = i40e_rss_enable_hash(pf, conf);
13739                         if (ret)
13740                                 return ret;
13741
13742                         update_conf.types |= conf->conf.types;
13743                         update_conf.key = conf->conf.key;
13744                         update_conf.key_len = conf->conf.key_len;
13745                 }
13746
13747                 /* Update RSS info in pf */
13748                 if (i40e_rss_conf_init(rss_info, &update_conf))
13749                         return -EINVAL;
13750         } else {
13751                 if (!conf->valid)
13752                         return 0;
13753
13754                 if (conf->conf.queue_num)
13755                         i40e_rss_clear_queue_region(pf);
13756                 else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13757                         i40e_rss_clear_hash_function(pf, conf);
13758                 else
13759                         i40e_rss_disable_hash(pf, conf);
13760         }
13761
13762         return 0;
13763 }
13764
13765 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
13766 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
13767 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
13768 RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
13769 #endif
13770 #ifdef RTE_LIBRTE_I40E_DEBUG_TX
13771 RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
13772 #endif
13773 #ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
13774 RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG);
13775 #endif
13776
13777 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
13778                               ETH_I40E_FLOATING_VEB_ARG "=1"
13779                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
13780                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
13781                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
13782                               ETH_I40E_USE_LATEST_VEC "=0|1");