217a7bbbd867a61cdf6d070be00daa621fb25d89
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
46 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
47 #define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
48 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
49
50 #define I40E_CLEAR_PXE_WAIT_MS     200
51 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
52 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
53
54 /* Maximun number of capability elements */
55 #define I40E_MAX_CAP_ELE_NUM       128
56
57 /* Wait count and interval */
58 #define I40E_CHK_Q_ENA_COUNT       1000
59 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
60
61 /* Maximun number of VSI */
62 #define I40E_MAX_NUM_VSIS          (384UL)
63
64 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
65
66 /* Flow control default timer */
67 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
68
69 /* Flow control enable fwd bit */
70 #define I40E_PRTMAC_FWD_CTRL   0x00000001
71
72 /* Receive Packet Buffer size */
73 #define I40E_RXPBSIZE (968 * 1024)
74
75 /* Kilobytes shift */
76 #define I40E_KILOSHIFT 10
77
78 /* Flow control default high water */
79 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
80
81 /* Flow control default low water */
82 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
83
84 /* Receive Average Packet Size in Byte*/
85 #define I40E_PACKET_AVERAGE_SIZE 128
86
87 /* Mask of PF interrupt causes */
88 #define I40E_PFINT_ICR0_ENA_MASK ( \
89                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
91                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
92                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
93                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
94                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
95                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
97                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
98
99 #define I40E_FLOW_TYPES ( \
100         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
105         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
110         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
111
112 /* Additional timesync values. */
113 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
114 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
115 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
116 #define I40E_PRTTSYN_TSYNENA     0x80000000
117 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
118 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
119
120 /**
121  * Below are values for writing un-exposed registers suggested
122  * by silicon experts
123  */
124 /* Destination MAC address */
125 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
126 /* Source MAC address */
127 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
128 /* Outer (S-Tag) VLAN tag in the outer L2 header */
129 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
130 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
131 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
132 /* Single VLAN tag in the inner L2 header */
133 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
134 /* Source IPv4 address */
135 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
136 /* Destination IPv4 address */
137 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
138 /* Source IPv4 address for X722 */
139 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
140 /* Destination IPv4 address for X722 */
141 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
142 /* IPv4 Protocol for X722 */
143 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
144 /* IPv4 Time to Live for X722 */
145 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
146 /* IPv4 Type of Service (TOS) */
147 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
148 /* IPv4 Protocol */
149 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
150 /* IPv4 Time to Live */
151 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
152 /* Source IPv6 address */
153 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
154 /* Destination IPv6 address */
155 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
156 /* IPv6 Traffic Class (TC) */
157 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
158 /* IPv6 Next Header */
159 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
160 /* IPv6 Hop Limit */
161 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
162 /* Source L4 port */
163 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
164 /* Destination L4 port */
165 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
166 /* SCTP verification tag */
167 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
168 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
169 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
170 /* Source port of tunneling UDP */
171 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
172 /* Destination port of tunneling UDP */
173 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
174 /* UDP Tunneling ID, NVGRE/GRE key */
175 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
176 /* Last ether type */
177 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
178 /* Tunneling outer destination IPv4 address */
179 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
180 /* Tunneling outer destination IPv6 address */
181 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
182 /* 1st word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
184 /* 2nd word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
186 /* 3rd word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
188 /* 4th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
190 /* 5th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
192 /* 6th word of flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
194 /* 7th word of flex payload */
195 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
196 /* 8th word of flex payload */
197 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
198 /* all 8 words flex payload */
199 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
200 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
201
202 #define I40E_TRANSLATE_INSET 0
203 #define I40E_TRANSLATE_REG   1
204
205 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
206 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
207 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
208 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
209 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
210 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
211
212 /* PCI offset for querying capability */
213 #define PCI_DEV_CAP_REG            0xA4
214 /* PCI offset for enabling/disabling Extended Tag */
215 #define PCI_DEV_CTRL_REG           0xA8
216 /* Bit mask of Extended Tag capability */
217 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
218 /* Bit shift of Extended Tag enable/disable */
219 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
220 /* Bit mask of Extended Tag enable/disable */
221 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
222
223 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
224 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
225 static int i40e_dev_configure(struct rte_eth_dev *dev);
226 static int i40e_dev_start(struct rte_eth_dev *dev);
227 static int i40e_dev_stop(struct rte_eth_dev *dev);
228 static int i40e_dev_close(struct rte_eth_dev *dev);
229 static int  i40e_dev_reset(struct rte_eth_dev *dev);
230 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
231 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
232 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
233 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
234 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
235 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
236 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
237                                struct rte_eth_stats *stats);
238 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
239                                struct rte_eth_xstat *xstats, unsigned n);
240 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
241                                      struct rte_eth_xstat_name *xstats_names,
242                                      unsigned limit);
243 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
244 static int i40e_fw_version_get(struct rte_eth_dev *dev,
245                                 char *fw_version, size_t fw_size);
246 static int i40e_dev_info_get(struct rte_eth_dev *dev,
247                              struct rte_eth_dev_info *dev_info);
248 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
249                                 uint16_t vlan_id,
250                                 int on);
251 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
252                               enum rte_vlan_type vlan_type,
253                               uint16_t tpid);
254 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
255 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
256                                       uint16_t queue,
257                                       int on);
258 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
259 static int i40e_dev_led_on(struct rte_eth_dev *dev);
260 static int i40e_dev_led_off(struct rte_eth_dev *dev);
261 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
264                               struct rte_eth_fc_conf *fc_conf);
265 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
266                                        struct rte_eth_pfc_conf *pfc_conf);
267 static int i40e_macaddr_add(struct rte_eth_dev *dev,
268                             struct rte_ether_addr *mac_addr,
269                             uint32_t index,
270                             uint32_t pool);
271 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
272 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
273                                     struct rte_eth_rss_reta_entry64 *reta_conf,
274                                     uint16_t reta_size);
275 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
276                                    struct rte_eth_rss_reta_entry64 *reta_conf,
277                                    uint16_t reta_size);
278
279 static int i40e_get_cap(struct i40e_hw *hw);
280 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
281 static int i40e_pf_setup(struct i40e_pf *pf);
282 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
283 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
284 static int i40e_dcb_setup(struct rte_eth_dev *dev);
285 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
286                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
287 static void i40e_stat_update_48(struct i40e_hw *hw,
288                                uint32_t hireg,
289                                uint32_t loreg,
290                                bool offset_loaded,
291                                uint64_t *offset,
292                                uint64_t *stat);
293 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
294 static void i40e_dev_interrupt_handler(void *param);
295 static void i40e_dev_alarm_handler(void *param);
296 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
297                                 uint32_t base, uint32_t num);
298 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
299 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
300                         uint32_t base);
301 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
302                         uint16_t num);
303 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
304 static int i40e_veb_release(struct i40e_veb *veb);
305 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
306                                                 struct i40e_vsi *vsi);
307 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
308 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
309                                              struct i40e_macvlan_filter *mv_f,
310                                              int num,
311                                              uint16_t vlan);
312 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
313 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
314                                     struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
316                                       struct rte_eth_rss_conf *rss_conf);
317 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
320                                         struct rte_eth_udp_tunnel *udp_tunnel);
321 static void i40e_filter_input_set_init(struct i40e_pf *pf);
322 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
323                                 enum rte_filter_op filter_op,
324                                 void *arg);
325 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
326                                 enum rte_filter_type filter_type,
327                                 enum rte_filter_op filter_op,
328                                 void *arg);
329 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
330                                   struct rte_eth_dcb_info *dcb_info);
331 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
332 static void i40e_configure_registers(struct i40e_hw *hw);
333 static void i40e_hw_init(struct rte_eth_dev *dev);
334 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
335 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
336                                                      uint16_t seid,
337                                                      uint16_t rule_type,
338                                                      uint16_t *entries,
339                                                      uint16_t count,
340                                                      uint16_t rule_id);
341 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
342                         struct rte_eth_mirror_conf *mirror_conf,
343                         uint8_t sw_id, uint8_t on);
344 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
345
346 static int i40e_timesync_enable(struct rte_eth_dev *dev);
347 static int i40e_timesync_disable(struct rte_eth_dev *dev);
348 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
349                                            struct timespec *timestamp,
350                                            uint32_t flags);
351 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
352                                            struct timespec *timestamp);
353 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
354
355 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
356
357 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
358                                    struct timespec *timestamp);
359 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
360                                     const struct timespec *timestamp);
361
362 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
363                                          uint16_t queue_id);
364 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
365                                           uint16_t queue_id);
366
367 static int i40e_get_regs(struct rte_eth_dev *dev,
368                          struct rte_dev_reg_info *regs);
369
370 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
371
372 static int i40e_get_eeprom(struct rte_eth_dev *dev,
373                            struct rte_dev_eeprom_info *eeprom);
374
375 static int i40e_get_module_info(struct rte_eth_dev *dev,
376                                 struct rte_eth_dev_module_info *modinfo);
377 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
378                                   struct rte_dev_eeprom_info *info);
379
380 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
381                                       struct rte_ether_addr *mac_addr);
382
383 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
384
385 static int i40e_ethertype_filter_convert(
386         const struct rte_eth_ethertype_filter *input,
387         struct i40e_ethertype_filter *filter);
388 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
389                                    struct i40e_ethertype_filter *filter);
390
391 static int i40e_tunnel_filter_convert(
392         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
393         struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
395                                 struct i40e_tunnel_filter *tunnel_filter);
396 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
397
398 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
399 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
400 static void i40e_filter_restore(struct i40e_pf *pf);
401 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
402 static int i40e_pf_config_rss(struct i40e_pf *pf);
403
404 static const char *const valid_keys[] = {
405         ETH_I40E_FLOATING_VEB_ARG,
406         ETH_I40E_FLOATING_VEB_LIST_ARG,
407         ETH_I40E_SUPPORT_MULTI_DRIVER,
408         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
409         ETH_I40E_USE_LATEST_VEC,
410         ETH_I40E_VF_MSG_CFG,
411         NULL};
412
413 static const struct rte_pci_id pci_id_i40e_map[] = {
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
425         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
426         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
440         { .vendor_id = 0, /* sentinel */ },
441 };
442
443 static const struct eth_dev_ops i40e_eth_dev_ops = {
444         .dev_configure                = i40e_dev_configure,
445         .dev_start                    = i40e_dev_start,
446         .dev_stop                     = i40e_dev_stop,
447         .dev_close                    = i40e_dev_close,
448         .dev_reset                    = i40e_dev_reset,
449         .promiscuous_enable           = i40e_dev_promiscuous_enable,
450         .promiscuous_disable          = i40e_dev_promiscuous_disable,
451         .allmulticast_enable          = i40e_dev_allmulticast_enable,
452         .allmulticast_disable         = i40e_dev_allmulticast_disable,
453         .dev_set_link_up              = i40e_dev_set_link_up,
454         .dev_set_link_down            = i40e_dev_set_link_down,
455         .link_update                  = i40e_dev_link_update,
456         .stats_get                    = i40e_dev_stats_get,
457         .xstats_get                   = i40e_dev_xstats_get,
458         .xstats_get_names             = i40e_dev_xstats_get_names,
459         .stats_reset                  = i40e_dev_stats_reset,
460         .xstats_reset                 = i40e_dev_stats_reset,
461         .fw_version_get               = i40e_fw_version_get,
462         .dev_infos_get                = i40e_dev_info_get,
463         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
464         .vlan_filter_set              = i40e_vlan_filter_set,
465         .vlan_tpid_set                = i40e_vlan_tpid_set,
466         .vlan_offload_set             = i40e_vlan_offload_set,
467         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
468         .vlan_pvid_set                = i40e_vlan_pvid_set,
469         .rx_queue_start               = i40e_dev_rx_queue_start,
470         .rx_queue_stop                = i40e_dev_rx_queue_stop,
471         .tx_queue_start               = i40e_dev_tx_queue_start,
472         .tx_queue_stop                = i40e_dev_tx_queue_stop,
473         .rx_queue_setup               = i40e_dev_rx_queue_setup,
474         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
475         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
476         .rx_queue_release             = i40e_dev_rx_queue_release,
477         .tx_queue_setup               = i40e_dev_tx_queue_setup,
478         .tx_queue_release             = i40e_dev_tx_queue_release,
479         .dev_led_on                   = i40e_dev_led_on,
480         .dev_led_off                  = i40e_dev_led_off,
481         .flow_ctrl_get                = i40e_flow_ctrl_get,
482         .flow_ctrl_set                = i40e_flow_ctrl_set,
483         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
484         .mac_addr_add                 = i40e_macaddr_add,
485         .mac_addr_remove              = i40e_macaddr_remove,
486         .reta_update                  = i40e_dev_rss_reta_update,
487         .reta_query                   = i40e_dev_rss_reta_query,
488         .rss_hash_update              = i40e_dev_rss_hash_update,
489         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
490         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
491         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
492         .filter_ctrl                  = i40e_dev_filter_ctrl,
493         .rxq_info_get                 = i40e_rxq_info_get,
494         .txq_info_get                 = i40e_txq_info_get,
495         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
496         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
497         .mirror_rule_set              = i40e_mirror_rule_set,
498         .mirror_rule_reset            = i40e_mirror_rule_reset,
499         .timesync_enable              = i40e_timesync_enable,
500         .timesync_disable             = i40e_timesync_disable,
501         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
502         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
503         .get_dcb_info                 = i40e_dev_get_dcb_info,
504         .timesync_adjust_time         = i40e_timesync_adjust_time,
505         .timesync_read_time           = i40e_timesync_read_time,
506         .timesync_write_time          = i40e_timesync_write_time,
507         .get_reg                      = i40e_get_regs,
508         .get_eeprom_length            = i40e_get_eeprom_length,
509         .get_eeprom                   = i40e_get_eeprom,
510         .get_module_info              = i40e_get_module_info,
511         .get_module_eeprom            = i40e_get_module_eeprom,
512         .mac_addr_set                 = i40e_set_default_mac_addr,
513         .mtu_set                      = i40e_dev_mtu_set,
514         .tm_ops_get                   = i40e_tm_ops_get,
515         .tx_done_cleanup              = i40e_tx_done_cleanup,
516 };
517
518 /* store statistics names and its offset in stats structure */
519 struct rte_i40e_xstats_name_off {
520         char name[RTE_ETH_XSTATS_NAME_SIZE];
521         unsigned offset;
522 };
523
524 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
525         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
526         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
527         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
528         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
529         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
530                 rx_unknown_protocol)},
531         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
532         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
533         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
534         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
535 };
536
537 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
538                 sizeof(rte_i40e_stats_strings[0]))
539
540 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
541         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
542                 tx_dropped_link_down)},
543         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
544         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
545                 illegal_bytes)},
546         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
547         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
548                 mac_local_faults)},
549         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
550                 mac_remote_faults)},
551         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
552                 rx_length_errors)},
553         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
554         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
555         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
556         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
557         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
558         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
559                 rx_size_127)},
560         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
561                 rx_size_255)},
562         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
563                 rx_size_511)},
564         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
565                 rx_size_1023)},
566         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
567                 rx_size_1522)},
568         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
569                 rx_size_big)},
570         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
571                 rx_undersize)},
572         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
573                 rx_oversize)},
574         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
575                 mac_short_packet_dropped)},
576         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
577                 rx_fragments)},
578         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
579         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
580         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
581                 tx_size_127)},
582         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
583                 tx_size_255)},
584         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
585                 tx_size_511)},
586         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
587                 tx_size_1023)},
588         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
589                 tx_size_1522)},
590         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
591                 tx_size_big)},
592         {"rx_flow_director_atr_match_packets",
593                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
594         {"rx_flow_director_sb_match_packets",
595                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
596         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
597                 tx_lpi_status)},
598         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
599                 rx_lpi_status)},
600         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
601                 tx_lpi_count)},
602         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
603                 rx_lpi_count)},
604 };
605
606 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
607                 sizeof(rte_i40e_hw_port_strings[0]))
608
609 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
610         {"xon_packets", offsetof(struct i40e_hw_port_stats,
611                 priority_xon_rx)},
612         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
613                 priority_xoff_rx)},
614 };
615
616 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
617                 sizeof(rte_i40e_rxq_prio_strings[0]))
618
619 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
620         {"xon_packets", offsetof(struct i40e_hw_port_stats,
621                 priority_xon_tx)},
622         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
623                 priority_xoff_tx)},
624         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xon_2_xoff)},
626 };
627
628 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
629                 sizeof(rte_i40e_txq_prio_strings[0]))
630
631 static int
632 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
633         struct rte_pci_device *pci_dev)
634 {
635         char name[RTE_ETH_NAME_MAX_LEN];
636         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
637         int i, retval;
638
639         if (pci_dev->device.devargs) {
640                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
641                                 &eth_da);
642                 if (retval)
643                         return retval;
644         }
645
646         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
647                 sizeof(struct i40e_adapter),
648                 eth_dev_pci_specific_init, pci_dev,
649                 eth_i40e_dev_init, NULL);
650
651         if (retval || eth_da.nb_representor_ports < 1)
652                 return retval;
653
654         /* probe VF representor ports */
655         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
656                 pci_dev->device.name);
657
658         if (pf_ethdev == NULL)
659                 return -ENODEV;
660
661         for (i = 0; i < eth_da.nb_representor_ports; i++) {
662                 struct i40e_vf_representor representor = {
663                         .vf_id = eth_da.representor_ports[i],
664                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
665                                 pf_ethdev->data->dev_private)->switch_domain_id,
666                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
667                                 pf_ethdev->data->dev_private)
668                 };
669
670                 /* representor port net_bdf_port */
671                 snprintf(name, sizeof(name), "net_%s_representor_%d",
672                         pci_dev->device.name, eth_da.representor_ports[i]);
673
674                 retval = rte_eth_dev_create(&pci_dev->device, name,
675                         sizeof(struct i40e_vf_representor), NULL, NULL,
676                         i40e_vf_representor_init, &representor);
677
678                 if (retval)
679                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
680                                 "representor %s.", name);
681         }
682
683         return 0;
684 }
685
686 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
687 {
688         struct rte_eth_dev *ethdev;
689
690         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
691         if (!ethdev)
692                 return 0;
693
694         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
695                 return rte_eth_dev_pci_generic_remove(pci_dev,
696                                         i40e_vf_representor_uninit);
697         else
698                 return rte_eth_dev_pci_generic_remove(pci_dev,
699                                                 eth_i40e_dev_uninit);
700 }
701
702 static struct rte_pci_driver rte_i40e_pmd = {
703         .id_table = pci_id_i40e_map,
704         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
705         .probe = eth_i40e_pci_probe,
706         .remove = eth_i40e_pci_remove,
707 };
708
709 static inline void
710 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
711                          uint32_t reg_val)
712 {
713         uint32_t ori_reg_val;
714         struct rte_eth_dev *dev;
715
716         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
717         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
718         i40e_write_rx_ctl(hw, reg_addr, reg_val);
719         if (ori_reg_val != reg_val)
720                 PMD_DRV_LOG(WARNING,
721                             "i40e device %s changed global register [0x%08x]."
722                             " original: 0x%08x, new: 0x%08x",
723                             dev->device->name, reg_addr, ori_reg_val, reg_val);
724 }
725
726 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
727 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
728 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
729
730 #ifndef I40E_GLQF_ORT
731 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
732 #endif
733 #ifndef I40E_GLQF_PIT
734 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
735 #endif
736 #ifndef I40E_GLQF_L3_MAP
737 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
738 #endif
739
740 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
741 {
742         /*
743          * Initialize registers for parsing packet type of QinQ
744          * This should be removed from code once proper
745          * configuration API is added to avoid configuration conflicts
746          * between ports of the same device.
747          */
748         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
749         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
750 }
751
752 static inline void i40e_config_automask(struct i40e_pf *pf)
753 {
754         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
755         uint32_t val;
756
757         /* INTENA flag is not auto-cleared for interrupt */
758         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
759         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
760                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
761
762         /* If support multi-driver, PF will use INT0. */
763         if (!pf->support_multi_driver)
764                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
765
766         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
767 }
768
769 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
770
771 /*
772  * Add a ethertype filter to drop all flow control frames transmitted
773  * from VSIs.
774 */
775 static void
776 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
777 {
778         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
779         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
780                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
781                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
782         int ret;
783
784         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
785                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
786                                 pf->main_vsi_seid, 0,
787                                 TRUE, NULL, NULL);
788         if (ret)
789                 PMD_INIT_LOG(ERR,
790                         "Failed to add filter to drop flow control frames from VSIs.");
791 }
792
793 static int
794 floating_veb_list_handler(__rte_unused const char *key,
795                           const char *floating_veb_value,
796                           void *opaque)
797 {
798         int idx = 0;
799         unsigned int count = 0;
800         char *end = NULL;
801         int min, max;
802         bool *vf_floating_veb = opaque;
803
804         while (isblank(*floating_veb_value))
805                 floating_veb_value++;
806
807         /* Reset floating VEB configuration for VFs */
808         for (idx = 0; idx < I40E_MAX_VF; idx++)
809                 vf_floating_veb[idx] = false;
810
811         min = I40E_MAX_VF;
812         do {
813                 while (isblank(*floating_veb_value))
814                         floating_veb_value++;
815                 if (*floating_veb_value == '\0')
816                         return -1;
817                 errno = 0;
818                 idx = strtoul(floating_veb_value, &end, 10);
819                 if (errno || end == NULL)
820                         return -1;
821                 while (isblank(*end))
822                         end++;
823                 if (*end == '-') {
824                         min = idx;
825                 } else if ((*end == ';') || (*end == '\0')) {
826                         max = idx;
827                         if (min == I40E_MAX_VF)
828                                 min = idx;
829                         if (max >= I40E_MAX_VF)
830                                 max = I40E_MAX_VF - 1;
831                         for (idx = min; idx <= max; idx++) {
832                                 vf_floating_veb[idx] = true;
833                                 count++;
834                         }
835                         min = I40E_MAX_VF;
836                 } else {
837                         return -1;
838                 }
839                 floating_veb_value = end + 1;
840         } while (*end != '\0');
841
842         if (count == 0)
843                 return -1;
844
845         return 0;
846 }
847
848 static void
849 config_vf_floating_veb(struct rte_devargs *devargs,
850                        uint16_t floating_veb,
851                        bool *vf_floating_veb)
852 {
853         struct rte_kvargs *kvlist;
854         int i;
855         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
856
857         if (!floating_veb)
858                 return;
859         /* All the VFs attach to the floating VEB by default
860          * when the floating VEB is enabled.
861          */
862         for (i = 0; i < I40E_MAX_VF; i++)
863                 vf_floating_veb[i] = true;
864
865         if (devargs == NULL)
866                 return;
867
868         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
869         if (kvlist == NULL)
870                 return;
871
872         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
873                 rte_kvargs_free(kvlist);
874                 return;
875         }
876         /* When the floating_veb_list parameter exists, all the VFs
877          * will attach to the legacy VEB firstly, then configure VFs
878          * to the floating VEB according to the floating_veb_list.
879          */
880         if (rte_kvargs_process(kvlist, floating_veb_list,
881                                floating_veb_list_handler,
882                                vf_floating_veb) < 0) {
883                 rte_kvargs_free(kvlist);
884                 return;
885         }
886         rte_kvargs_free(kvlist);
887 }
888
889 static int
890 i40e_check_floating_handler(__rte_unused const char *key,
891                             const char *value,
892                             __rte_unused void *opaque)
893 {
894         if (strcmp(value, "1"))
895                 return -1;
896
897         return 0;
898 }
899
900 static int
901 is_floating_veb_supported(struct rte_devargs *devargs)
902 {
903         struct rte_kvargs *kvlist;
904         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
905
906         if (devargs == NULL)
907                 return 0;
908
909         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
910         if (kvlist == NULL)
911                 return 0;
912
913         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
914                 rte_kvargs_free(kvlist);
915                 return 0;
916         }
917         /* Floating VEB is enabled when there's key-value:
918          * enable_floating_veb=1
919          */
920         if (rte_kvargs_process(kvlist, floating_veb_key,
921                                i40e_check_floating_handler, NULL) < 0) {
922                 rte_kvargs_free(kvlist);
923                 return 0;
924         }
925         rte_kvargs_free(kvlist);
926
927         return 1;
928 }
929
930 static void
931 config_floating_veb(struct rte_eth_dev *dev)
932 {
933         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
934         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
935         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
936
937         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
938
939         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
940                 pf->floating_veb =
941                         is_floating_veb_supported(pci_dev->device.devargs);
942                 config_vf_floating_veb(pci_dev->device.devargs,
943                                        pf->floating_veb,
944                                        pf->floating_veb_list);
945         } else {
946                 pf->floating_veb = false;
947         }
948 }
949
950 #define I40E_L2_TAGS_S_TAG_SHIFT 1
951 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
952
953 static int
954 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
955 {
956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
957         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
958         char ethertype_hash_name[RTE_HASH_NAMESIZE];
959         int ret;
960
961         struct rte_hash_parameters ethertype_hash_params = {
962                 .name = ethertype_hash_name,
963                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
964                 .key_len = sizeof(struct i40e_ethertype_filter_input),
965                 .hash_func = rte_hash_crc,
966                 .hash_func_init_val = 0,
967                 .socket_id = rte_socket_id(),
968         };
969
970         /* Initialize ethertype filter rule list and hash */
971         TAILQ_INIT(&ethertype_rule->ethertype_list);
972         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
973                  "ethertype_%s", dev->device->name);
974         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
975         if (!ethertype_rule->hash_table) {
976                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
977                 return -EINVAL;
978         }
979         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
980                                        sizeof(struct i40e_ethertype_filter *) *
981                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
982                                        0);
983         if (!ethertype_rule->hash_map) {
984                 PMD_INIT_LOG(ERR,
985                              "Failed to allocate memory for ethertype hash map!");
986                 ret = -ENOMEM;
987                 goto err_ethertype_hash_map_alloc;
988         }
989
990         return 0;
991
992 err_ethertype_hash_map_alloc:
993         rte_hash_free(ethertype_rule->hash_table);
994
995         return ret;
996 }
997
998 static int
999 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1000 {
1001         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1002         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1003         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1004         int ret;
1005
1006         struct rte_hash_parameters tunnel_hash_params = {
1007                 .name = tunnel_hash_name,
1008                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1009                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1010                 .hash_func = rte_hash_crc,
1011                 .hash_func_init_val = 0,
1012                 .socket_id = rte_socket_id(),
1013         };
1014
1015         /* Initialize tunnel filter rule list and hash */
1016         TAILQ_INIT(&tunnel_rule->tunnel_list);
1017         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1018                  "tunnel_%s", dev->device->name);
1019         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1020         if (!tunnel_rule->hash_table) {
1021                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1022                 return -EINVAL;
1023         }
1024         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1025                                     sizeof(struct i40e_tunnel_filter *) *
1026                                     I40E_MAX_TUNNEL_FILTER_NUM,
1027                                     0);
1028         if (!tunnel_rule->hash_map) {
1029                 PMD_INIT_LOG(ERR,
1030                              "Failed to allocate memory for tunnel hash map!");
1031                 ret = -ENOMEM;
1032                 goto err_tunnel_hash_map_alloc;
1033         }
1034
1035         return 0;
1036
1037 err_tunnel_hash_map_alloc:
1038         rte_hash_free(tunnel_rule->hash_table);
1039
1040         return ret;
1041 }
1042
1043 static int
1044 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1045 {
1046         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1047         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1048         struct i40e_fdir_info *fdir_info = &pf->fdir;
1049         char fdir_hash_name[RTE_HASH_NAMESIZE];
1050         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1051         uint32_t best = hw->func_caps.fd_filters_best_effort;
1052         struct rte_bitmap *bmp = NULL;
1053         uint32_t bmp_size;
1054         void *mem = NULL;
1055         uint32_t i = 0;
1056         int ret;
1057
1058         struct rte_hash_parameters fdir_hash_params = {
1059                 .name = fdir_hash_name,
1060                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1061                 .key_len = sizeof(struct i40e_fdir_input),
1062                 .hash_func = rte_hash_crc,
1063                 .hash_func_init_val = 0,
1064                 .socket_id = rte_socket_id(),
1065         };
1066
1067         /* Initialize flow director filter rule list and hash */
1068         TAILQ_INIT(&fdir_info->fdir_list);
1069         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1070                  "fdir_%s", dev->device->name);
1071         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1072         if (!fdir_info->hash_table) {
1073                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1074                 return -EINVAL;
1075         }
1076
1077         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1078                                           sizeof(struct i40e_fdir_filter *) *
1079                                           I40E_MAX_FDIR_FILTER_NUM,
1080                                           0);
1081         if (!fdir_info->hash_map) {
1082                 PMD_INIT_LOG(ERR,
1083                              "Failed to allocate memory for fdir hash map!");
1084                 ret = -ENOMEM;
1085                 goto err_fdir_hash_map_alloc;
1086         }
1087
1088         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1089                         sizeof(struct i40e_fdir_filter) *
1090                         I40E_MAX_FDIR_FILTER_NUM,
1091                         0);
1092
1093         if (!fdir_info->fdir_filter_array) {
1094                 PMD_INIT_LOG(ERR,
1095                              "Failed to allocate memory for fdir filter array!");
1096                 ret = -ENOMEM;
1097                 goto err_fdir_filter_array_alloc;
1098         }
1099
1100         fdir_info->fdir_space_size = alloc + best;
1101         fdir_info->fdir_actual_cnt = 0;
1102         fdir_info->fdir_guarantee_total_space = alloc;
1103         fdir_info->fdir_guarantee_free_space =
1104                 fdir_info->fdir_guarantee_total_space;
1105
1106         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1107
1108         fdir_info->fdir_flow_pool.pool =
1109                         rte_zmalloc("i40e_fdir_entry",
1110                                 sizeof(struct i40e_fdir_entry) *
1111                                 fdir_info->fdir_space_size,
1112                                 0);
1113
1114         if (!fdir_info->fdir_flow_pool.pool) {
1115                 PMD_INIT_LOG(ERR,
1116                              "Failed to allocate memory for bitmap flow!");
1117                 ret = -ENOMEM;
1118                 goto err_fdir_bitmap_flow_alloc;
1119         }
1120
1121         for (i = 0; i < fdir_info->fdir_space_size; i++)
1122                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1123
1124         bmp_size =
1125                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1126         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1127         if (mem == NULL) {
1128                 PMD_INIT_LOG(ERR,
1129                              "Failed to allocate memory for fdir bitmap!");
1130                 ret = -ENOMEM;
1131                 goto err_fdir_mem_alloc;
1132         }
1133         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1134         if (bmp == NULL) {
1135                 PMD_INIT_LOG(ERR,
1136                              "Failed to initialization fdir bitmap!");
1137                 ret = -ENOMEM;
1138                 goto err_fdir_bmp_alloc;
1139         }
1140         for (i = 0; i < fdir_info->fdir_space_size; i++)
1141                 rte_bitmap_set(bmp, i);
1142
1143         fdir_info->fdir_flow_pool.bitmap = bmp;
1144
1145         return 0;
1146
1147 err_fdir_bmp_alloc:
1148         rte_free(mem);
1149 err_fdir_mem_alloc:
1150         rte_free(fdir_info->fdir_flow_pool.pool);
1151 err_fdir_bitmap_flow_alloc:
1152         rte_free(fdir_info->fdir_filter_array);
1153 err_fdir_filter_array_alloc:
1154         rte_free(fdir_info->hash_map);
1155 err_fdir_hash_map_alloc:
1156         rte_hash_free(fdir_info->hash_table);
1157
1158         return ret;
1159 }
1160
1161 static void
1162 i40e_init_customized_info(struct i40e_pf *pf)
1163 {
1164         int i;
1165
1166         /* Initialize customized pctype */
1167         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1168                 pf->customized_pctype[i].index = i;
1169                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1170                 pf->customized_pctype[i].valid = false;
1171         }
1172
1173         pf->gtp_support = false;
1174         pf->esp_support = false;
1175 }
1176
1177 static void
1178 i40e_init_filter_invalidation(struct i40e_pf *pf)
1179 {
1180         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1181         struct i40e_fdir_info *fdir_info = &pf->fdir;
1182         uint32_t glqf_ctl_reg = 0;
1183
1184         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1185         if (!pf->support_multi_driver) {
1186                 fdir_info->fdir_invalprio = 1;
1187                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1188                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1189                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1190         } else {
1191                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1192                         fdir_info->fdir_invalprio = 1;
1193                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1194                 } else {
1195                         fdir_info->fdir_invalprio = 0;
1196                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1197                 }
1198         }
1199 }
1200
1201 void
1202 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1203 {
1204         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1206         struct i40e_queue_regions *info = &pf->queue_region;
1207         uint16_t i;
1208
1209         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1210                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1211
1212         memset(info, 0, sizeof(struct i40e_queue_regions));
1213 }
1214
1215 static int
1216 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1217                                const char *value,
1218                                void *opaque)
1219 {
1220         struct i40e_pf *pf;
1221         unsigned long support_multi_driver;
1222         char *end;
1223
1224         pf = (struct i40e_pf *)opaque;
1225
1226         errno = 0;
1227         support_multi_driver = strtoul(value, &end, 10);
1228         if (errno != 0 || end == value || *end != 0) {
1229                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1230                 return -(EINVAL);
1231         }
1232
1233         if (support_multi_driver == 1 || support_multi_driver == 0)
1234                 pf->support_multi_driver = (bool)support_multi_driver;
1235         else
1236                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1237                             "enable global configuration by default."
1238                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1239         return 0;
1240 }
1241
1242 static int
1243 i40e_support_multi_driver(struct rte_eth_dev *dev)
1244 {
1245         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1246         struct rte_kvargs *kvlist;
1247         int kvargs_count;
1248
1249         /* Enable global configuration by default */
1250         pf->support_multi_driver = false;
1251
1252         if (!dev->device->devargs)
1253                 return 0;
1254
1255         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1256         if (!kvlist)
1257                 return -EINVAL;
1258
1259         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1260         if (!kvargs_count) {
1261                 rte_kvargs_free(kvlist);
1262                 return 0;
1263         }
1264
1265         if (kvargs_count > 1)
1266                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1267                             "the first invalid or last valid one is used !",
1268                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1269
1270         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1271                                i40e_parse_multi_drv_handler, pf) < 0) {
1272                 rte_kvargs_free(kvlist);
1273                 return -EINVAL;
1274         }
1275
1276         rte_kvargs_free(kvlist);
1277         return 0;
1278 }
1279
1280 static int
1281 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1282                                     uint32_t reg_addr, uint64_t reg_val,
1283                                     struct i40e_asq_cmd_details *cmd_details)
1284 {
1285         uint64_t ori_reg_val;
1286         struct rte_eth_dev *dev;
1287         int ret;
1288
1289         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1290         if (ret != I40E_SUCCESS) {
1291                 PMD_DRV_LOG(ERR,
1292                             "Fail to debug read from 0x%08x",
1293                             reg_addr);
1294                 return -EIO;
1295         }
1296         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1297
1298         if (ori_reg_val != reg_val)
1299                 PMD_DRV_LOG(WARNING,
1300                             "i40e device %s changed global register [0x%08x]."
1301                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1302                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1303
1304         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1305 }
1306
1307 static int
1308 i40e_parse_latest_vec_handler(__rte_unused const char *key,
1309                                 const char *value,
1310                                 void *opaque)
1311 {
1312         struct i40e_adapter *ad = opaque;
1313         int use_latest_vec;
1314
1315         use_latest_vec = atoi(value);
1316
1317         if (use_latest_vec != 0 && use_latest_vec != 1)
1318                 PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
1319
1320         ad->use_latest_vec = (uint8_t)use_latest_vec;
1321
1322         return 0;
1323 }
1324
1325 static int
1326 i40e_use_latest_vec(struct rte_eth_dev *dev)
1327 {
1328         struct i40e_adapter *ad =
1329                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1330         struct rte_kvargs *kvlist;
1331         int kvargs_count;
1332
1333         ad->use_latest_vec = false;
1334
1335         if (!dev->device->devargs)
1336                 return 0;
1337
1338         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1339         if (!kvlist)
1340                 return -EINVAL;
1341
1342         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
1343         if (!kvargs_count) {
1344                 rte_kvargs_free(kvlist);
1345                 return 0;
1346         }
1347
1348         if (kvargs_count > 1)
1349                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1350                             "the first invalid or last valid one is used !",
1351                             ETH_I40E_USE_LATEST_VEC);
1352
1353         if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
1354                                 i40e_parse_latest_vec_handler, ad) < 0) {
1355                 rte_kvargs_free(kvlist);
1356                 return -EINVAL;
1357         }
1358
1359         rte_kvargs_free(kvlist);
1360         return 0;
1361 }
1362
1363 static int
1364 read_vf_msg_config(__rte_unused const char *key,
1365                                const char *value,
1366                                void *opaque)
1367 {
1368         struct i40e_vf_msg_cfg *cfg = opaque;
1369
1370         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1371                         &cfg->ignore_second) != 3) {
1372                 memset(cfg, 0, sizeof(*cfg));
1373                 PMD_DRV_LOG(ERR, "format error! example: "
1374                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1375                 return -EINVAL;
1376         }
1377
1378         /*
1379          * If the message validation function been enabled, the 'period'
1380          * and 'ignore_second' must greater than 0.
1381          */
1382         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1383                 memset(cfg, 0, sizeof(*cfg));
1384                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1385                                 " number must be greater than 0!",
1386                                 ETH_I40E_VF_MSG_CFG);
1387                 return -EINVAL;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1395                 struct i40e_vf_msg_cfg *msg_cfg)
1396 {
1397         struct rte_kvargs *kvlist;
1398         int kvargs_count;
1399         int ret = 0;
1400
1401         memset(msg_cfg, 0, sizeof(*msg_cfg));
1402
1403         if (!dev->device->devargs)
1404                 return ret;
1405
1406         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1407         if (!kvlist)
1408                 return -EINVAL;
1409
1410         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1411         if (!kvargs_count)
1412                 goto free_end;
1413
1414         if (kvargs_count > 1) {
1415                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1416                                 ETH_I40E_VF_MSG_CFG);
1417                 ret = -EINVAL;
1418                 goto free_end;
1419         }
1420
1421         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1422                         read_vf_msg_config, msg_cfg) < 0)
1423                 ret = -EINVAL;
1424
1425 free_end:
1426         rte_kvargs_free(kvlist);
1427         return ret;
1428 }
1429
1430 #define I40E_ALARM_INTERVAL 50000 /* us */
1431
1432 static int
1433 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1434 {
1435         struct rte_pci_device *pci_dev;
1436         struct rte_intr_handle *intr_handle;
1437         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1439         struct i40e_vsi *vsi;
1440         int ret;
1441         uint32_t len, val;
1442         uint8_t aq_fail = 0;
1443
1444         PMD_INIT_FUNC_TRACE();
1445
1446         dev->dev_ops = &i40e_eth_dev_ops;
1447         dev->rx_queue_count = i40e_dev_rx_queue_count;
1448         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1449         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1450         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1451         dev->rx_pkt_burst = i40e_recv_pkts;
1452         dev->tx_pkt_burst = i40e_xmit_pkts;
1453         dev->tx_pkt_prepare = i40e_prep_pkts;
1454
1455         /* for secondary processes, we don't initialise any further as primary
1456          * has already done this work. Only check we don't need a different
1457          * RX function */
1458         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1459                 i40e_set_rx_function(dev);
1460                 i40e_set_tx_function(dev);
1461                 return 0;
1462         }
1463         i40e_set_default_ptype_table(dev);
1464         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1465         intr_handle = &pci_dev->intr_handle;
1466
1467         rte_eth_copy_pci_info(dev, pci_dev);
1468         dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1469
1470         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1471         pf->adapter->eth_dev = dev;
1472         pf->dev_data = dev->data;
1473
1474         hw->back = I40E_PF_TO_ADAPTER(pf);
1475         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1476         if (!hw->hw_addr) {
1477                 PMD_INIT_LOG(ERR,
1478                         "Hardware is not available, as address is NULL");
1479                 return -ENODEV;
1480         }
1481
1482         hw->vendor_id = pci_dev->id.vendor_id;
1483         hw->device_id = pci_dev->id.device_id;
1484         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1485         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1486         hw->bus.device = pci_dev->addr.devid;
1487         hw->bus.func = pci_dev->addr.function;
1488         hw->adapter_stopped = 0;
1489         hw->adapter_closed = 0;
1490
1491         /* Init switch device pointer */
1492         hw->switch_dev = NULL;
1493
1494         /*
1495          * Switch Tag value should not be identical to either the First Tag
1496          * or Second Tag values. So set something other than common Ethertype
1497          * for internal switching.
1498          */
1499         hw->switch_tag = 0xffff;
1500
1501         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1502         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1503                 PMD_INIT_LOG(ERR, "\nERROR: "
1504                         "Firmware recovery mode detected. Limiting functionality.\n"
1505                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1506                         "User Guide for details on firmware recovery mode.");
1507                 return -EIO;
1508         }
1509
1510         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1511         /* Check if need to support multi-driver */
1512         i40e_support_multi_driver(dev);
1513         /* Check if users want the latest supported vec path */
1514         i40e_use_latest_vec(dev);
1515
1516         /* Make sure all is clean before doing PF reset */
1517         i40e_clear_hw(hw);
1518
1519         /* Reset here to make sure all is clean for each PF */
1520         ret = i40e_pf_reset(hw);
1521         if (ret) {
1522                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1523                 return ret;
1524         }
1525
1526         /* Initialize the shared code (base driver) */
1527         ret = i40e_init_shared_code(hw);
1528         if (ret) {
1529                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1530                 return ret;
1531         }
1532
1533         /* Initialize the parameters for adminq */
1534         i40e_init_adminq_parameter(hw);
1535         ret = i40e_init_adminq(hw);
1536         if (ret != I40E_SUCCESS) {
1537                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1538                 return -EIO;
1539         }
1540         /* Firmware of SFP x722 does not support adminq option */
1541         if (hw->device_id == I40E_DEV_ID_SFP_X722)
1542                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1543
1544         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1545                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1546                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1547                      ((hw->nvm.version >> 12) & 0xf),
1548                      ((hw->nvm.version >> 4) & 0xff),
1549                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1550
1551         /* Initialize the hardware */
1552         i40e_hw_init(dev);
1553
1554         i40e_config_automask(pf);
1555
1556         i40e_set_default_pctype_table(dev);
1557
1558         /*
1559          * To work around the NVM issue, initialize registers
1560          * for packet type of QinQ by software.
1561          * It should be removed once issues are fixed in NVM.
1562          */
1563         if (!pf->support_multi_driver)
1564                 i40e_GLQF_reg_init(hw);
1565
1566         /* Initialize the input set for filters (hash and fd) to default value */
1567         i40e_filter_input_set_init(pf);
1568
1569         /* initialise the L3_MAP register */
1570         if (!pf->support_multi_driver) {
1571                 ret = i40e_aq_debug_write_global_register(hw,
1572                                                    I40E_GLQF_L3_MAP(40),
1573                                                    0x00000028,  NULL);
1574                 if (ret)
1575                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1576                                      ret);
1577                 PMD_INIT_LOG(DEBUG,
1578                              "Global register 0x%08x is changed with 0x28",
1579                              I40E_GLQF_L3_MAP(40));
1580         }
1581
1582         /* Need the special FW version to support floating VEB */
1583         config_floating_veb(dev);
1584         /* Clear PXE mode */
1585         i40e_clear_pxe_mode(hw);
1586         i40e_dev_sync_phy_type(hw);
1587
1588         /*
1589          * On X710, performance number is far from the expectation on recent
1590          * firmware versions. The fix for this issue may not be integrated in
1591          * the following firmware version. So the workaround in software driver
1592          * is needed. It needs to modify the initial values of 3 internal only
1593          * registers. Note that the workaround can be removed when it is fixed
1594          * in firmware in the future.
1595          */
1596         i40e_configure_registers(hw);
1597
1598         /* Get hw capabilities */
1599         ret = i40e_get_cap(hw);
1600         if (ret != I40E_SUCCESS) {
1601                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1602                 goto err_get_capabilities;
1603         }
1604
1605         /* Initialize parameters for PF */
1606         ret = i40e_pf_parameter_init(dev);
1607         if (ret != 0) {
1608                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1609                 goto err_parameter_init;
1610         }
1611
1612         /* Initialize the queue management */
1613         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1614         if (ret < 0) {
1615                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1616                 goto err_qp_pool_init;
1617         }
1618         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1619                                 hw->func_caps.num_msix_vectors - 1);
1620         if (ret < 0) {
1621                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1622                 goto err_msix_pool_init;
1623         }
1624
1625         /* Initialize lan hmc */
1626         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1627                                 hw->func_caps.num_rx_qp, 0, 0);
1628         if (ret != I40E_SUCCESS) {
1629                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1630                 goto err_init_lan_hmc;
1631         }
1632
1633         /* Configure lan hmc */
1634         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1635         if (ret != I40E_SUCCESS) {
1636                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1637                 goto err_configure_lan_hmc;
1638         }
1639
1640         /* Get and check the mac address */
1641         i40e_get_mac_addr(hw, hw->mac.addr);
1642         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1643                 PMD_INIT_LOG(ERR, "mac address is not valid");
1644                 ret = -EIO;
1645                 goto err_get_mac_addr;
1646         }
1647         /* Copy the permanent MAC address */
1648         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1649                         (struct rte_ether_addr *)hw->mac.perm_addr);
1650
1651         /* Disable flow control */
1652         hw->fc.requested_mode = I40E_FC_NONE;
1653         i40e_set_fc(hw, &aq_fail, TRUE);
1654
1655         /* Set the global registers with default ether type value */
1656         if (!pf->support_multi_driver) {
1657                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1658                                          RTE_ETHER_TYPE_VLAN);
1659                 if (ret != I40E_SUCCESS) {
1660                         PMD_INIT_LOG(ERR,
1661                                      "Failed to set the default outer "
1662                                      "VLAN ether type");
1663                         goto err_setup_pf_switch;
1664                 }
1665         }
1666
1667         /* PF setup, which includes VSI setup */
1668         ret = i40e_pf_setup(pf);
1669         if (ret) {
1670                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1671                 goto err_setup_pf_switch;
1672         }
1673
1674         vsi = pf->main_vsi;
1675
1676         /* Disable double vlan by default */
1677         i40e_vsi_config_double_vlan(vsi, FALSE);
1678
1679         /* Disable S-TAG identification when floating_veb is disabled */
1680         if (!pf->floating_veb) {
1681                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1682                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1683                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1684                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1685                 }
1686         }
1687
1688         if (!vsi->max_macaddrs)
1689                 len = RTE_ETHER_ADDR_LEN;
1690         else
1691                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1692
1693         /* Should be after VSI initialized */
1694         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1695         if (!dev->data->mac_addrs) {
1696                 PMD_INIT_LOG(ERR,
1697                         "Failed to allocated memory for storing mac address");
1698                 goto err_mac_alloc;
1699         }
1700         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1701                                         &dev->data->mac_addrs[0]);
1702
1703         /* Init dcb to sw mode by default */
1704         ret = i40e_dcb_init_configure(dev, TRUE);
1705         if (ret != I40E_SUCCESS) {
1706                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1707                 pf->flags &= ~I40E_FLAG_DCB;
1708         }
1709         /* Update HW struct after DCB configuration */
1710         i40e_get_cap(hw);
1711
1712         /* initialize pf host driver to setup SRIOV resource if applicable */
1713         i40e_pf_host_init(dev);
1714
1715         /* register callback func to eal lib */
1716         rte_intr_callback_register(intr_handle,
1717                                    i40e_dev_interrupt_handler, dev);
1718
1719         /* configure and enable device interrupt */
1720         i40e_pf_config_irq0(hw, TRUE);
1721         i40e_pf_enable_irq0(hw);
1722
1723         /* enable uio intr after callback register */
1724         rte_intr_enable(intr_handle);
1725
1726         /* By default disable flexible payload in global configuration */
1727         if (!pf->support_multi_driver)
1728                 i40e_flex_payload_reg_set_default(hw);
1729
1730         /*
1731          * Add an ethertype filter to drop all flow control frames transmitted
1732          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1733          * frames to wire.
1734          */
1735         i40e_add_tx_flow_control_drop_filter(pf);
1736
1737         /* Set the max frame size to 0x2600 by default,
1738          * in case other drivers changed the default value.
1739          */
1740         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1741
1742         /* initialize mirror rule list */
1743         TAILQ_INIT(&pf->mirror_list);
1744
1745         /* initialize RSS rule list */
1746         TAILQ_INIT(&pf->rss_config_list);
1747
1748         /* initialize Traffic Manager configuration */
1749         i40e_tm_conf_init(dev);
1750
1751         /* Initialize customized information */
1752         i40e_init_customized_info(pf);
1753
1754         /* Initialize the filter invalidation configuration */
1755         i40e_init_filter_invalidation(pf);
1756
1757         ret = i40e_init_ethtype_filter_list(dev);
1758         if (ret < 0)
1759                 goto err_init_ethtype_filter_list;
1760         ret = i40e_init_tunnel_filter_list(dev);
1761         if (ret < 0)
1762                 goto err_init_tunnel_filter_list;
1763         ret = i40e_init_fdir_filter_list(dev);
1764         if (ret < 0)
1765                 goto err_init_fdir_filter_list;
1766
1767         /* initialize queue region configuration */
1768         i40e_init_queue_region_conf(dev);
1769
1770         /* initialize RSS configuration from rte_flow */
1771         memset(&pf->rss_info, 0,
1772                 sizeof(struct i40e_rte_flow_rss_conf));
1773
1774         /* reset all stats of the device, including pf and main vsi */
1775         i40e_dev_stats_reset(dev);
1776
1777         return 0;
1778
1779 err_init_fdir_filter_list:
1780         rte_free(pf->tunnel.hash_table);
1781         rte_free(pf->tunnel.hash_map);
1782 err_init_tunnel_filter_list:
1783         rte_free(pf->ethertype.hash_table);
1784         rte_free(pf->ethertype.hash_map);
1785 err_init_ethtype_filter_list:
1786         rte_free(dev->data->mac_addrs);
1787         dev->data->mac_addrs = NULL;
1788 err_mac_alloc:
1789         i40e_vsi_release(pf->main_vsi);
1790 err_setup_pf_switch:
1791 err_get_mac_addr:
1792 err_configure_lan_hmc:
1793         (void)i40e_shutdown_lan_hmc(hw);
1794 err_init_lan_hmc:
1795         i40e_res_pool_destroy(&pf->msix_pool);
1796 err_msix_pool_init:
1797         i40e_res_pool_destroy(&pf->qp_pool);
1798 err_qp_pool_init:
1799 err_parameter_init:
1800 err_get_capabilities:
1801         (void)i40e_shutdown_adminq(hw);
1802
1803         return ret;
1804 }
1805
1806 static void
1807 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1808 {
1809         struct i40e_ethertype_filter *p_ethertype;
1810         struct i40e_ethertype_rule *ethertype_rule;
1811
1812         ethertype_rule = &pf->ethertype;
1813         /* Remove all ethertype filter rules and hash */
1814         if (ethertype_rule->hash_map)
1815                 rte_free(ethertype_rule->hash_map);
1816         if (ethertype_rule->hash_table)
1817                 rte_hash_free(ethertype_rule->hash_table);
1818
1819         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1820                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1821                              p_ethertype, rules);
1822                 rte_free(p_ethertype);
1823         }
1824 }
1825
1826 static void
1827 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1828 {
1829         struct i40e_tunnel_filter *p_tunnel;
1830         struct i40e_tunnel_rule *tunnel_rule;
1831
1832         tunnel_rule = &pf->tunnel;
1833         /* Remove all tunnel director rules and hash */
1834         if (tunnel_rule->hash_map)
1835                 rte_free(tunnel_rule->hash_map);
1836         if (tunnel_rule->hash_table)
1837                 rte_hash_free(tunnel_rule->hash_table);
1838
1839         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1840                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1841                 rte_free(p_tunnel);
1842         }
1843 }
1844
1845 static void
1846 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1847 {
1848         struct i40e_fdir_filter *p_fdir;
1849         struct i40e_fdir_info *fdir_info;
1850
1851         fdir_info = &pf->fdir;
1852
1853         /* Remove all flow director rules */
1854         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1855                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1856 }
1857
1858 static void
1859 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1860 {
1861         struct i40e_fdir_info *fdir_info;
1862
1863         fdir_info = &pf->fdir;
1864
1865         /* flow director memory cleanup */
1866         if (fdir_info->hash_map)
1867                 rte_free(fdir_info->hash_map);
1868         if (fdir_info->hash_table)
1869                 rte_hash_free(fdir_info->hash_table);
1870         if (fdir_info->fdir_flow_pool.bitmap)
1871                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1872         if (fdir_info->fdir_flow_pool.pool)
1873                 rte_free(fdir_info->fdir_flow_pool.pool);
1874         if (fdir_info->fdir_filter_array)
1875                 rte_free(fdir_info->fdir_filter_array);
1876 }
1877
1878 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1879 {
1880         /*
1881          * Disable by default flexible payload
1882          * for corresponding L2/L3/L4 layers.
1883          */
1884         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1885         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1886         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1887 }
1888
1889 static int
1890 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1891 {
1892         struct i40e_hw *hw;
1893
1894         PMD_INIT_FUNC_TRACE();
1895
1896         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1897                 return 0;
1898
1899         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1900
1901         if (hw->adapter_closed == 0)
1902                 i40e_dev_close(dev);
1903
1904         return 0;
1905 }
1906
1907 static int
1908 i40e_dev_configure(struct rte_eth_dev *dev)
1909 {
1910         struct i40e_adapter *ad =
1911                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1912         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1913         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1914         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1915         int i, ret;
1916
1917         ret = i40e_dev_sync_phy_type(hw);
1918         if (ret)
1919                 return ret;
1920
1921         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1922          * bulk allocation or vector Rx preconditions we will reset it.
1923          */
1924         ad->rx_bulk_alloc_allowed = true;
1925         ad->rx_vec_allowed = true;
1926         ad->tx_simple_allowed = true;
1927         ad->tx_vec_allowed = true;
1928
1929         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1930                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1931
1932         /* Only legacy filter API needs the following fdir config. So when the
1933          * legacy filter API is deprecated, the following codes should also be
1934          * removed.
1935          */
1936         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1937                 ret = i40e_fdir_setup(pf);
1938                 if (ret != I40E_SUCCESS) {
1939                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1940                         return -ENOTSUP;
1941                 }
1942                 ret = i40e_fdir_configure(dev);
1943                 if (ret < 0) {
1944                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1945                         goto err;
1946                 }
1947         } else
1948                 i40e_fdir_teardown(pf);
1949
1950         ret = i40e_dev_init_vlan(dev);
1951         if (ret < 0)
1952                 goto err;
1953
1954         /* VMDQ setup.
1955          *  General PMD driver call sequence are NIC init, configure,
1956          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1957          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1958          *  applicable. So, VMDQ setting has to be done before
1959          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1960          *  For RSS setting, it will try to calculate actual configured RX queue
1961          *  number, which will be available after rx_queue_setup(). dev_start()
1962          *  function is good to place RSS setup.
1963          */
1964         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1965                 ret = i40e_vmdq_setup(dev);
1966                 if (ret)
1967                         goto err;
1968         }
1969
1970         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1971                 ret = i40e_dcb_setup(dev);
1972                 if (ret) {
1973                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1974                         goto err_dcb;
1975                 }
1976         }
1977
1978         TAILQ_INIT(&pf->flow_list);
1979
1980         return 0;
1981
1982 err_dcb:
1983         /* need to release vmdq resource if exists */
1984         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1985                 i40e_vsi_release(pf->vmdq[i].vsi);
1986                 pf->vmdq[i].vsi = NULL;
1987         }
1988         rte_free(pf->vmdq);
1989         pf->vmdq = NULL;
1990 err:
1991         /* Need to release fdir resource if exists.
1992          * Only legacy filter API needs the following fdir config. So when the
1993          * legacy filter API is deprecated, the following code should also be
1994          * removed.
1995          */
1996         i40e_fdir_teardown(pf);
1997         return ret;
1998 }
1999
2000 void
2001 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
2002 {
2003         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2004         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2005         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2006         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2007         uint16_t msix_vect = vsi->msix_intr;
2008         uint16_t i;
2009
2010         for (i = 0; i < vsi->nb_qps; i++) {
2011                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2012                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2013                 rte_wmb();
2014         }
2015
2016         if (vsi->type != I40E_VSI_SRIOV) {
2017                 if (!rte_intr_allow_others(intr_handle)) {
2018                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2019                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2020                         I40E_WRITE_REG(hw,
2021                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2022                                        0);
2023                 } else {
2024                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2025                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2026                         I40E_WRITE_REG(hw,
2027                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2028                                                        msix_vect - 1), 0);
2029                 }
2030         } else {
2031                 uint32_t reg;
2032                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2033                         vsi->user_param + (msix_vect - 1);
2034
2035                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2036                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2037         }
2038         I40E_WRITE_FLUSH(hw);
2039 }
2040
2041 static void
2042 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2043                        int base_queue, int nb_queue,
2044                        uint16_t itr_idx)
2045 {
2046         int i;
2047         uint32_t val;
2048         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2049         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2050
2051         /* Bind all RX queues to allocated MSIX interrupt */
2052         for (i = 0; i < nb_queue; i++) {
2053                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2054                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2055                         ((base_queue + i + 1) <<
2056                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2057                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2058                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2059
2060                 if (i == nb_queue - 1)
2061                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2062                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2063         }
2064
2065         /* Write first RX queue to Link list register as the head element */
2066         if (vsi->type != I40E_VSI_SRIOV) {
2067                 uint16_t interval =
2068                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2069
2070                 if (msix_vect == I40E_MISC_VEC_ID) {
2071                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2072                                        (base_queue <<
2073                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2074                                        (0x0 <<
2075                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2076                         I40E_WRITE_REG(hw,
2077                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2078                                        interval);
2079                 } else {
2080                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2081                                        (base_queue <<
2082                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2083                                        (0x0 <<
2084                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2085                         I40E_WRITE_REG(hw,
2086                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2087                                                        msix_vect - 1),
2088                                        interval);
2089                 }
2090         } else {
2091                 uint32_t reg;
2092
2093                 if (msix_vect == I40E_MISC_VEC_ID) {
2094                         I40E_WRITE_REG(hw,
2095                                        I40E_VPINT_LNKLST0(vsi->user_param),
2096                                        (base_queue <<
2097                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2098                                        (0x0 <<
2099                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2100                 } else {
2101                         /* num_msix_vectors_vf needs to minus irq0 */
2102                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2103                                 vsi->user_param + (msix_vect - 1);
2104
2105                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2106                                        (base_queue <<
2107                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2108                                        (0x0 <<
2109                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2110                 }
2111         }
2112
2113         I40E_WRITE_FLUSH(hw);
2114 }
2115
2116 int
2117 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2118 {
2119         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2120         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2121         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2122         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2123         uint16_t msix_vect = vsi->msix_intr;
2124         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2125         uint16_t queue_idx = 0;
2126         int record = 0;
2127         int i;
2128
2129         for (i = 0; i < vsi->nb_qps; i++) {
2130                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2131                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2132         }
2133
2134         /* VF bind interrupt */
2135         if (vsi->type == I40E_VSI_SRIOV) {
2136                 if (vsi->nb_msix == 0) {
2137                         PMD_DRV_LOG(ERR, "No msix resource");
2138                         return -EINVAL;
2139                 }
2140                 __vsi_queues_bind_intr(vsi, msix_vect,
2141                                        vsi->base_queue, vsi->nb_qps,
2142                                        itr_idx);
2143                 return 0;
2144         }
2145
2146         /* PF & VMDq bind interrupt */
2147         if (rte_intr_dp_is_en(intr_handle)) {
2148                 if (vsi->type == I40E_VSI_MAIN) {
2149                         queue_idx = 0;
2150                         record = 1;
2151                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2152                         struct i40e_vsi *main_vsi =
2153                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2154                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2155                         record = 1;
2156                 }
2157         }
2158
2159         for (i = 0; i < vsi->nb_used_qps; i++) {
2160                 if (vsi->nb_msix == 0) {
2161                         PMD_DRV_LOG(ERR, "No msix resource");
2162                         return -EINVAL;
2163                 } else if (nb_msix <= 1) {
2164                         if (!rte_intr_allow_others(intr_handle))
2165                                 /* allow to share MISC_VEC_ID */
2166                                 msix_vect = I40E_MISC_VEC_ID;
2167
2168                         /* no enough msix_vect, map all to one */
2169                         __vsi_queues_bind_intr(vsi, msix_vect,
2170                                                vsi->base_queue + i,
2171                                                vsi->nb_used_qps - i,
2172                                                itr_idx);
2173                         for (; !!record && i < vsi->nb_used_qps; i++)
2174                                 intr_handle->intr_vec[queue_idx + i] =
2175                                         msix_vect;
2176                         break;
2177                 }
2178                 /* 1:1 queue/msix_vect mapping */
2179                 __vsi_queues_bind_intr(vsi, msix_vect,
2180                                        vsi->base_queue + i, 1,
2181                                        itr_idx);
2182                 if (!!record)
2183                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2184
2185                 msix_vect++;
2186                 nb_msix--;
2187         }
2188
2189         return 0;
2190 }
2191
2192 void
2193 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2194 {
2195         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2196         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2197         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2198         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2199         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2200         uint16_t msix_intr, i;
2201
2202         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2203                 for (i = 0; i < vsi->nb_msix; i++) {
2204                         msix_intr = vsi->msix_intr + i;
2205                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2206                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2207                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2208                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2209                 }
2210         else
2211                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2212                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2213                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2214                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2215
2216         I40E_WRITE_FLUSH(hw);
2217 }
2218
2219 void
2220 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2221 {
2222         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2223         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2224         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2225         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2226         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2227         uint16_t msix_intr, i;
2228
2229         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2230                 for (i = 0; i < vsi->nb_msix; i++) {
2231                         msix_intr = vsi->msix_intr + i;
2232                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2233                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2234                 }
2235         else
2236                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2237                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2238
2239         I40E_WRITE_FLUSH(hw);
2240 }
2241
2242 static inline uint8_t
2243 i40e_parse_link_speeds(uint16_t link_speeds)
2244 {
2245         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2246
2247         if (link_speeds & ETH_LINK_SPEED_40G)
2248                 link_speed |= I40E_LINK_SPEED_40GB;
2249         if (link_speeds & ETH_LINK_SPEED_25G)
2250                 link_speed |= I40E_LINK_SPEED_25GB;
2251         if (link_speeds & ETH_LINK_SPEED_20G)
2252                 link_speed |= I40E_LINK_SPEED_20GB;
2253         if (link_speeds & ETH_LINK_SPEED_10G)
2254                 link_speed |= I40E_LINK_SPEED_10GB;
2255         if (link_speeds & ETH_LINK_SPEED_1G)
2256                 link_speed |= I40E_LINK_SPEED_1GB;
2257         if (link_speeds & ETH_LINK_SPEED_100M)
2258                 link_speed |= I40E_LINK_SPEED_100MB;
2259
2260         return link_speed;
2261 }
2262
2263 static int
2264 i40e_phy_conf_link(struct i40e_hw *hw,
2265                    uint8_t abilities,
2266                    uint8_t force_speed,
2267                    bool is_up)
2268 {
2269         enum i40e_status_code status;
2270         struct i40e_aq_get_phy_abilities_resp phy_ab;
2271         struct i40e_aq_set_phy_config phy_conf;
2272         enum i40e_aq_phy_type cnt;
2273         uint8_t avail_speed;
2274         uint32_t phy_type_mask = 0;
2275
2276         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2277                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2278                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2279                         I40E_AQ_PHY_FLAG_LOW_POWER;
2280         int ret = -ENOTSUP;
2281
2282         /* To get phy capabilities of available speeds. */
2283         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2284                                               NULL);
2285         if (status) {
2286                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2287                                 status);
2288                 return ret;
2289         }
2290         avail_speed = phy_ab.link_speed;
2291
2292         /* To get the current phy config. */
2293         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2294                                               NULL);
2295         if (status) {
2296                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2297                                 status);
2298                 return ret;
2299         }
2300
2301         /* If link needs to go up and it is in autoneg mode the speed is OK,
2302          * no need to set up again.
2303          */
2304         if (is_up && phy_ab.phy_type != 0 &&
2305                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2306                      phy_ab.link_speed != 0)
2307                 return I40E_SUCCESS;
2308
2309         memset(&phy_conf, 0, sizeof(phy_conf));
2310
2311         /* bits 0-2 use the values from get_phy_abilities_resp */
2312         abilities &= ~mask;
2313         abilities |= phy_ab.abilities & mask;
2314
2315         phy_conf.abilities = abilities;
2316
2317         /* If link needs to go up, but the force speed is not supported,
2318          * Warn users and config the default available speeds.
2319          */
2320         if (is_up && !(force_speed & avail_speed)) {
2321                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2322                 phy_conf.link_speed = avail_speed;
2323         } else {
2324                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2325         }
2326
2327         /* PHY type mask needs to include each type except PHY type extension */
2328         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2329                 phy_type_mask |= 1 << cnt;
2330
2331         /* use get_phy_abilities_resp value for the rest */
2332         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2333         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2334                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2335                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2336         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2337         phy_conf.eee_capability = phy_ab.eee_capability;
2338         phy_conf.eeer = phy_ab.eeer_val;
2339         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2340
2341         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2342                     phy_ab.abilities, phy_ab.link_speed);
2343         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2344                     phy_conf.abilities, phy_conf.link_speed);
2345
2346         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2347         if (status)
2348                 return ret;
2349
2350         return I40E_SUCCESS;
2351 }
2352
2353 static int
2354 i40e_apply_link_speed(struct rte_eth_dev *dev)
2355 {
2356         uint8_t speed;
2357         uint8_t abilities = 0;
2358         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2359         struct rte_eth_conf *conf = &dev->data->dev_conf;
2360
2361         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2362                      I40E_AQ_PHY_LINK_ENABLED;
2363
2364         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2365                 conf->link_speeds = ETH_LINK_SPEED_40G |
2366                                     ETH_LINK_SPEED_25G |
2367                                     ETH_LINK_SPEED_20G |
2368                                     ETH_LINK_SPEED_10G |
2369                                     ETH_LINK_SPEED_1G |
2370                                     ETH_LINK_SPEED_100M;
2371
2372                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2373         } else {
2374                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2375         }
2376         speed = i40e_parse_link_speeds(conf->link_speeds);
2377
2378         return i40e_phy_conf_link(hw, abilities, speed, true);
2379 }
2380
2381 static int
2382 i40e_dev_start(struct rte_eth_dev *dev)
2383 {
2384         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2385         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2386         struct i40e_vsi *main_vsi = pf->main_vsi;
2387         int ret, i;
2388         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2389         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2390         uint32_t intr_vector = 0;
2391         struct i40e_vsi *vsi;
2392         uint16_t nb_rxq, nb_txq;
2393
2394         hw->adapter_stopped = 0;
2395
2396         rte_intr_disable(intr_handle);
2397
2398         if ((rte_intr_cap_multiple(intr_handle) ||
2399              !RTE_ETH_DEV_SRIOV(dev).active) &&
2400             dev->data->dev_conf.intr_conf.rxq != 0) {
2401                 intr_vector = dev->data->nb_rx_queues;
2402                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2403                 if (ret)
2404                         return ret;
2405         }
2406
2407         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2408                 intr_handle->intr_vec =
2409                         rte_zmalloc("intr_vec",
2410                                     dev->data->nb_rx_queues * sizeof(int),
2411                                     0);
2412                 if (!intr_handle->intr_vec) {
2413                         PMD_INIT_LOG(ERR,
2414                                 "Failed to allocate %d rx_queues intr_vec",
2415                                 dev->data->nb_rx_queues);
2416                         return -ENOMEM;
2417                 }
2418         }
2419
2420         /* Initialize VSI */
2421         ret = i40e_dev_rxtx_init(pf);
2422         if (ret != I40E_SUCCESS) {
2423                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2424                 return ret;
2425         }
2426
2427         /* Map queues with MSIX interrupt */
2428         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2429                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2430         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2431         if (ret < 0)
2432                 return ret;
2433         i40e_vsi_enable_queues_intr(main_vsi);
2434
2435         /* Map VMDQ VSI queues with MSIX interrupt */
2436         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2437                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2438                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2439                                                 I40E_ITR_INDEX_DEFAULT);
2440                 if (ret < 0)
2441                         return ret;
2442                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2443         }
2444
2445         /* Enable all queues which have been configured */
2446         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2447                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2448                 if (ret)
2449                         goto rx_err;
2450         }
2451
2452         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2453                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2454                 if (ret)
2455                         goto tx_err;
2456         }
2457
2458         /* Enable receiving broadcast packets */
2459         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2460         if (ret != I40E_SUCCESS)
2461                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2462
2463         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2464                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2465                                                 true, NULL);
2466                 if (ret != I40E_SUCCESS)
2467                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2468         }
2469
2470         /* Enable the VLAN promiscuous mode. */
2471         if (pf->vfs) {
2472                 for (i = 0; i < pf->vf_num; i++) {
2473                         vsi = pf->vfs[i].vsi;
2474                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2475                                                      true, NULL);
2476                 }
2477         }
2478
2479         /* Enable mac loopback mode */
2480         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2481             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2482                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2483                 if (ret != I40E_SUCCESS) {
2484                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2485                         goto tx_err;
2486                 }
2487         }
2488
2489         /* Apply link configure */
2490         ret = i40e_apply_link_speed(dev);
2491         if (I40E_SUCCESS != ret) {
2492                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2493                 goto tx_err;
2494         }
2495
2496         if (!rte_intr_allow_others(intr_handle)) {
2497                 rte_intr_callback_unregister(intr_handle,
2498                                              i40e_dev_interrupt_handler,
2499                                              (void *)dev);
2500                 /* configure and enable device interrupt */
2501                 i40e_pf_config_irq0(hw, FALSE);
2502                 i40e_pf_enable_irq0(hw);
2503
2504                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2505                         PMD_INIT_LOG(INFO,
2506                                 "lsc won't enable because of no intr multiplex");
2507         } else {
2508                 ret = i40e_aq_set_phy_int_mask(hw,
2509                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2510                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2511                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2512                 if (ret != I40E_SUCCESS)
2513                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2514
2515                 /* Call get_link_info aq commond to enable/disable LSE */
2516                 i40e_dev_link_update(dev, 0);
2517         }
2518
2519         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2520                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2521                                   i40e_dev_alarm_handler, dev);
2522         } else {
2523                 /* enable uio intr after callback register */
2524                 rte_intr_enable(intr_handle);
2525         }
2526
2527         i40e_filter_restore(pf);
2528
2529         if (pf->tm_conf.root && !pf->tm_conf.committed)
2530                 PMD_DRV_LOG(WARNING,
2531                             "please call hierarchy_commit() "
2532                             "before starting the port");
2533
2534         return I40E_SUCCESS;
2535
2536 tx_err:
2537         for (i = 0; i < nb_txq; i++)
2538                 i40e_dev_tx_queue_stop(dev, i);
2539 rx_err:
2540         for (i = 0; i < nb_rxq; i++)
2541                 i40e_dev_rx_queue_stop(dev, i);
2542
2543         return ret;
2544 }
2545
2546 static int
2547 i40e_dev_stop(struct rte_eth_dev *dev)
2548 {
2549         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2550         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2551         struct i40e_vsi *main_vsi = pf->main_vsi;
2552         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2553         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2554         int i;
2555
2556         if (hw->adapter_stopped == 1)
2557                 return 0;
2558
2559         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2560                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2561                 rte_intr_enable(intr_handle);
2562         }
2563
2564         /* Disable all queues */
2565         for (i = 0; i < dev->data->nb_tx_queues; i++)
2566                 i40e_dev_tx_queue_stop(dev, i);
2567
2568         for (i = 0; i < dev->data->nb_rx_queues; i++)
2569                 i40e_dev_rx_queue_stop(dev, i);
2570
2571         /* un-map queues with interrupt registers */
2572         i40e_vsi_disable_queues_intr(main_vsi);
2573         i40e_vsi_queues_unbind_intr(main_vsi);
2574
2575         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2576                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2577                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2578         }
2579
2580         /* Clear all queues and release memory */
2581         i40e_dev_clear_queues(dev);
2582
2583         /* Set link down */
2584         i40e_dev_set_link_down(dev);
2585
2586         if (!rte_intr_allow_others(intr_handle))
2587                 /* resume to the default handler */
2588                 rte_intr_callback_register(intr_handle,
2589                                            i40e_dev_interrupt_handler,
2590                                            (void *)dev);
2591
2592         /* Clean datapath event and queue/vec mapping */
2593         rte_intr_efd_disable(intr_handle);
2594         if (intr_handle->intr_vec) {
2595                 rte_free(intr_handle->intr_vec);
2596                 intr_handle->intr_vec = NULL;
2597         }
2598
2599         /* reset hierarchy commit */
2600         pf->tm_conf.committed = false;
2601
2602         hw->adapter_stopped = 1;
2603         dev->data->dev_started = 0;
2604
2605         pf->adapter->rss_reta_updated = 0;
2606
2607         return 0;
2608 }
2609
2610 static int
2611 i40e_dev_close(struct rte_eth_dev *dev)
2612 {
2613         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2614         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2615         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2616         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2617         struct i40e_mirror_rule *p_mirror;
2618         struct i40e_filter_control_settings settings;
2619         struct rte_flow *p_flow;
2620         uint32_t reg;
2621         int i;
2622         int ret;
2623         uint8_t aq_fail = 0;
2624         int retries = 0;
2625
2626         PMD_INIT_FUNC_TRACE();
2627         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2628                 return 0;
2629
2630         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2631         if (ret)
2632                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2633
2634
2635         ret = i40e_dev_stop(dev);
2636
2637         /* Remove all mirror rules */
2638         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2639                 ret = i40e_aq_del_mirror_rule(hw,
2640                                               pf->main_vsi->veb->seid,
2641                                               p_mirror->rule_type,
2642                                               p_mirror->entries,
2643                                               p_mirror->num_entries,
2644                                               p_mirror->id);
2645                 if (ret < 0)
2646                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2647                                     "status = %d, aq_err = %d.", ret,
2648                                     hw->aq.asq_last_status);
2649
2650                 /* remove mirror software resource anyway */
2651                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2652                 rte_free(p_mirror);
2653                 pf->nb_mirror_rule--;
2654         }
2655
2656         i40e_dev_free_queues(dev);
2657
2658         /* Disable interrupt */
2659         i40e_pf_disable_irq0(hw);
2660         rte_intr_disable(intr_handle);
2661
2662         /*
2663          * Only legacy filter API needs the following fdir config. So when the
2664          * legacy filter API is deprecated, the following code should also be
2665          * removed.
2666          */
2667         i40e_fdir_teardown(pf);
2668
2669         /* shutdown and destroy the HMC */
2670         i40e_shutdown_lan_hmc(hw);
2671
2672         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2673                 i40e_vsi_release(pf->vmdq[i].vsi);
2674                 pf->vmdq[i].vsi = NULL;
2675         }
2676         rte_free(pf->vmdq);
2677         pf->vmdq = NULL;
2678
2679         /* release all the existing VSIs and VEBs */
2680         i40e_vsi_release(pf->main_vsi);
2681
2682         /* shutdown the adminq */
2683         i40e_aq_queue_shutdown(hw, true);
2684         i40e_shutdown_adminq(hw);
2685
2686         i40e_res_pool_destroy(&pf->qp_pool);
2687         i40e_res_pool_destroy(&pf->msix_pool);
2688
2689         /* Disable flexible payload in global configuration */
2690         if (!pf->support_multi_driver)
2691                 i40e_flex_payload_reg_set_default(hw);
2692
2693         /* force a PF reset to clean anything leftover */
2694         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2695         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2696                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2697         I40E_WRITE_FLUSH(hw);
2698
2699         /* Clear PXE mode */
2700         i40e_clear_pxe_mode(hw);
2701
2702         /* Unconfigure filter control */
2703         memset(&settings, 0, sizeof(settings));
2704         ret = i40e_set_filter_control(hw, &settings);
2705         if (ret)
2706                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2707                                         ret);
2708
2709         /* Disable flow control */
2710         hw->fc.requested_mode = I40E_FC_NONE;
2711         i40e_set_fc(hw, &aq_fail, TRUE);
2712
2713         /* uninitialize pf host driver */
2714         i40e_pf_host_uninit(dev);
2715
2716         do {
2717                 ret = rte_intr_callback_unregister(intr_handle,
2718                                 i40e_dev_interrupt_handler, dev);
2719                 if (ret >= 0 || ret == -ENOENT) {
2720                         break;
2721                 } else if (ret != -EAGAIN) {
2722                         PMD_INIT_LOG(ERR,
2723                                  "intr callback unregister failed: %d",
2724                                  ret);
2725                 }
2726                 i40e_msec_delay(500);
2727         } while (retries++ < 5);
2728
2729         i40e_rm_ethtype_filter_list(pf);
2730         i40e_rm_tunnel_filter_list(pf);
2731         i40e_rm_fdir_filter_list(pf);
2732
2733         /* Remove all flows */
2734         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2735                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2736                 /* Do not free FDIR flows since they are static allocated */
2737                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2738                         rte_free(p_flow);
2739         }
2740
2741         /* release the fdir static allocated memory */
2742         i40e_fdir_memory_cleanup(pf);
2743
2744         /* Remove all Traffic Manager configuration */
2745         i40e_tm_conf_uninit(dev);
2746
2747         hw->adapter_closed = 1;
2748         return ret;
2749 }
2750
2751 /*
2752  * Reset PF device only to re-initialize resources in PMD layer
2753  */
2754 static int
2755 i40e_dev_reset(struct rte_eth_dev *dev)
2756 {
2757         int ret;
2758
2759         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2760          * its VF to make them align with it. The detailed notification
2761          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2762          * To avoid unexpected behavior in VF, currently reset of PF with
2763          * SR-IOV activation is not supported. It might be supported later.
2764          */
2765         if (dev->data->sriov.active)
2766                 return -ENOTSUP;
2767
2768         ret = eth_i40e_dev_uninit(dev);
2769         if (ret)
2770                 return ret;
2771
2772         ret = eth_i40e_dev_init(dev, NULL);
2773
2774         return ret;
2775 }
2776
2777 static int
2778 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2779 {
2780         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2781         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2782         struct i40e_vsi *vsi = pf->main_vsi;
2783         int status;
2784
2785         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2786                                                      true, NULL, true);
2787         if (status != I40E_SUCCESS) {
2788                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2789                 return -EAGAIN;
2790         }
2791
2792         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2793                                                         TRUE, NULL);
2794         if (status != I40E_SUCCESS) {
2795                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2796                 /* Rollback unicast promiscuous mode */
2797                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2798                                                     false, NULL, true);
2799                 return -EAGAIN;
2800         }
2801
2802         return 0;
2803 }
2804
2805 static int
2806 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2807 {
2808         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2809         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2810         struct i40e_vsi *vsi = pf->main_vsi;
2811         int status;
2812
2813         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2814                                                      false, NULL, true);
2815         if (status != I40E_SUCCESS) {
2816                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2817                 return -EAGAIN;
2818         }
2819
2820         /* must remain in all_multicast mode */
2821         if (dev->data->all_multicast == 1)
2822                 return 0;
2823
2824         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2825                                                         false, NULL);
2826         if (status != I40E_SUCCESS) {
2827                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2828                 /* Rollback unicast promiscuous mode */
2829                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2830                                                     true, NULL, true);
2831                 return -EAGAIN;
2832         }
2833
2834         return 0;
2835 }
2836
2837 static int
2838 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2839 {
2840         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2841         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2842         struct i40e_vsi *vsi = pf->main_vsi;
2843         int ret;
2844
2845         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2846         if (ret != I40E_SUCCESS) {
2847                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2848                 return -EAGAIN;
2849         }
2850
2851         return 0;
2852 }
2853
2854 static int
2855 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2856 {
2857         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2858         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2859         struct i40e_vsi *vsi = pf->main_vsi;
2860         int ret;
2861
2862         if (dev->data->promiscuous == 1)
2863                 return 0; /* must remain in all_multicast mode */
2864
2865         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2866                                 vsi->seid, FALSE, NULL);
2867         if (ret != I40E_SUCCESS) {
2868                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2869                 return -EAGAIN;
2870         }
2871
2872         return 0;
2873 }
2874
2875 /*
2876  * Set device link up.
2877  */
2878 static int
2879 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2880 {
2881         /* re-apply link speed setting */
2882         return i40e_apply_link_speed(dev);
2883 }
2884
2885 /*
2886  * Set device link down.
2887  */
2888 static int
2889 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2890 {
2891         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2892         uint8_t abilities = 0;
2893         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2894
2895         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2896         return i40e_phy_conf_link(hw, abilities, speed, false);
2897 }
2898
2899 static __rte_always_inline void
2900 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2901 {
2902 /* Link status registers and values*/
2903 #define I40E_PRTMAC_LINKSTA             0x001E2420
2904 #define I40E_REG_LINK_UP                0x40000080
2905 #define I40E_PRTMAC_MACC                0x001E24E0
2906 #define I40E_REG_MACC_25GB              0x00020000
2907 #define I40E_REG_SPEED_MASK             0x38000000
2908 #define I40E_REG_SPEED_0                0x00000000
2909 #define I40E_REG_SPEED_1                0x08000000
2910 #define I40E_REG_SPEED_2                0x10000000
2911 #define I40E_REG_SPEED_3                0x18000000
2912 #define I40E_REG_SPEED_4                0x20000000
2913         uint32_t link_speed;
2914         uint32_t reg_val;
2915
2916         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2917         link_speed = reg_val & I40E_REG_SPEED_MASK;
2918         reg_val &= I40E_REG_LINK_UP;
2919         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2920
2921         if (unlikely(link->link_status == 0))
2922                 return;
2923
2924         /* Parse the link status */
2925         switch (link_speed) {
2926         case I40E_REG_SPEED_0:
2927                 link->link_speed = ETH_SPEED_NUM_100M;
2928                 break;
2929         case I40E_REG_SPEED_1:
2930                 link->link_speed = ETH_SPEED_NUM_1G;
2931                 break;
2932         case I40E_REG_SPEED_2:
2933                 if (hw->mac.type == I40E_MAC_X722)
2934                         link->link_speed = ETH_SPEED_NUM_2_5G;
2935                 else
2936                         link->link_speed = ETH_SPEED_NUM_10G;
2937                 break;
2938         case I40E_REG_SPEED_3:
2939                 if (hw->mac.type == I40E_MAC_X722) {
2940                         link->link_speed = ETH_SPEED_NUM_5G;
2941                 } else {
2942                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2943
2944                         if (reg_val & I40E_REG_MACC_25GB)
2945                                 link->link_speed = ETH_SPEED_NUM_25G;
2946                         else
2947                                 link->link_speed = ETH_SPEED_NUM_40G;
2948                 }
2949                 break;
2950         case I40E_REG_SPEED_4:
2951                 if (hw->mac.type == I40E_MAC_X722)
2952                         link->link_speed = ETH_SPEED_NUM_10G;
2953                 else
2954                         link->link_speed = ETH_SPEED_NUM_20G;
2955                 break;
2956         default:
2957                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2958                 break;
2959         }
2960 }
2961
2962 static __rte_always_inline void
2963 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2964         bool enable_lse, int wait_to_complete)
2965 {
2966 #define CHECK_INTERVAL             100  /* 100ms */
2967 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2968         uint32_t rep_cnt = MAX_REPEAT_TIME;
2969         struct i40e_link_status link_status;
2970         int status;
2971
2972         memset(&link_status, 0, sizeof(link_status));
2973
2974         do {
2975                 memset(&link_status, 0, sizeof(link_status));
2976
2977                 /* Get link status information from hardware */
2978                 status = i40e_aq_get_link_info(hw, enable_lse,
2979                                                 &link_status, NULL);
2980                 if (unlikely(status != I40E_SUCCESS)) {
2981                         link->link_speed = ETH_SPEED_NUM_NONE;
2982                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2983                         PMD_DRV_LOG(ERR, "Failed to get link info");
2984                         return;
2985                 }
2986
2987                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2988                 if (!wait_to_complete || link->link_status)
2989                         break;
2990
2991                 rte_delay_ms(CHECK_INTERVAL);
2992         } while (--rep_cnt);
2993
2994         /* Parse the link status */
2995         switch (link_status.link_speed) {
2996         case I40E_LINK_SPEED_100MB:
2997                 link->link_speed = ETH_SPEED_NUM_100M;
2998                 break;
2999         case I40E_LINK_SPEED_1GB:
3000                 link->link_speed = ETH_SPEED_NUM_1G;
3001                 break;
3002         case I40E_LINK_SPEED_10GB:
3003                 link->link_speed = ETH_SPEED_NUM_10G;
3004                 break;
3005         case I40E_LINK_SPEED_20GB:
3006                 link->link_speed = ETH_SPEED_NUM_20G;
3007                 break;
3008         case I40E_LINK_SPEED_25GB:
3009                 link->link_speed = ETH_SPEED_NUM_25G;
3010                 break;
3011         case I40E_LINK_SPEED_40GB:
3012                 link->link_speed = ETH_SPEED_NUM_40G;
3013                 break;
3014         default:
3015                 if (link->link_status)
3016                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3017                 else
3018                         link->link_speed = ETH_SPEED_NUM_NONE;
3019                 break;
3020         }
3021 }
3022
3023 int
3024 i40e_dev_link_update(struct rte_eth_dev *dev,
3025                      int wait_to_complete)
3026 {
3027         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3028         struct rte_eth_link link;
3029         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3030         int ret;
3031
3032         memset(&link, 0, sizeof(link));
3033
3034         /* i40e uses full duplex only */
3035         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3036         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3037                         ETH_LINK_SPEED_FIXED);
3038
3039         if (!wait_to_complete && !enable_lse)
3040                 update_link_reg(hw, &link);
3041         else
3042                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3043
3044         if (hw->switch_dev)
3045                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3046
3047         ret = rte_eth_linkstatus_set(dev, &link);
3048         i40e_notify_all_vfs_link_status(dev);
3049
3050         return ret;
3051 }
3052
3053 static void
3054 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3055                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3056                           uint64_t *stat, uint64_t *prev_stat)
3057 {
3058         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3059         /* enlarge the limitation when statistics counters overflowed */
3060         if (offset_loaded) {
3061                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3062                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3063                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3064         }
3065         *prev_stat = *stat;
3066 }
3067
3068 /* Get all the statistics of a VSI */
3069 void
3070 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3071 {
3072         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3073         struct i40e_eth_stats *nes = &vsi->eth_stats;
3074         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3075         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3076
3077         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3078                                   vsi->offset_loaded, &oes->rx_bytes,
3079                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3080         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3081                             vsi->offset_loaded, &oes->rx_unicast,
3082                             &nes->rx_unicast);
3083         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3084                             vsi->offset_loaded, &oes->rx_multicast,
3085                             &nes->rx_multicast);
3086         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3087                             vsi->offset_loaded, &oes->rx_broadcast,
3088                             &nes->rx_broadcast);
3089         /* exclude CRC bytes */
3090         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3091                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3092
3093         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3094                             &oes->rx_discards, &nes->rx_discards);
3095         /* GLV_REPC not supported */
3096         /* GLV_RMPC not supported */
3097         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3098                             &oes->rx_unknown_protocol,
3099                             &nes->rx_unknown_protocol);
3100         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3101                                   vsi->offset_loaded, &oes->tx_bytes,
3102                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3103         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3104                             vsi->offset_loaded, &oes->tx_unicast,
3105                             &nes->tx_unicast);
3106         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3107                             vsi->offset_loaded, &oes->tx_multicast,
3108                             &nes->tx_multicast);
3109         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3110                             vsi->offset_loaded,  &oes->tx_broadcast,
3111                             &nes->tx_broadcast);
3112         /* GLV_TDPC not supported */
3113         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3114                             &oes->tx_errors, &nes->tx_errors);
3115         vsi->offset_loaded = true;
3116
3117         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3118                     vsi->vsi_id);
3119         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3120         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3121         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3122         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3123         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3124         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3125                     nes->rx_unknown_protocol);
3126         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3127         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3128         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3129         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3130         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3131         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3132         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3133                     vsi->vsi_id);
3134 }
3135
3136 static void
3137 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3138 {
3139         unsigned int i;
3140         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3141         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3142
3143         /* Get rx/tx bytes of internal transfer packets */
3144         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3145                                   I40E_GLV_GORCL(hw->port),
3146                                   pf->offset_loaded,
3147                                   &pf->internal_stats_offset.rx_bytes,
3148                                   &pf->internal_stats.rx_bytes,
3149                                   &pf->internal_prev_rx_bytes);
3150         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3151                                   I40E_GLV_GOTCL(hw->port),
3152                                   pf->offset_loaded,
3153                                   &pf->internal_stats_offset.tx_bytes,
3154                                   &pf->internal_stats.tx_bytes,
3155                                   &pf->internal_prev_tx_bytes);
3156         /* Get total internal rx packet count */
3157         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3158                             I40E_GLV_UPRCL(hw->port),
3159                             pf->offset_loaded,
3160                             &pf->internal_stats_offset.rx_unicast,
3161                             &pf->internal_stats.rx_unicast);
3162         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3163                             I40E_GLV_MPRCL(hw->port),
3164                             pf->offset_loaded,
3165                             &pf->internal_stats_offset.rx_multicast,
3166                             &pf->internal_stats.rx_multicast);
3167         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3168                             I40E_GLV_BPRCL(hw->port),
3169                             pf->offset_loaded,
3170                             &pf->internal_stats_offset.rx_broadcast,
3171                             &pf->internal_stats.rx_broadcast);
3172         /* Get total internal tx packet count */
3173         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3174                             I40E_GLV_UPTCL(hw->port),
3175                             pf->offset_loaded,
3176                             &pf->internal_stats_offset.tx_unicast,
3177                             &pf->internal_stats.tx_unicast);
3178         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3179                             I40E_GLV_MPTCL(hw->port),
3180                             pf->offset_loaded,
3181                             &pf->internal_stats_offset.tx_multicast,
3182                             &pf->internal_stats.tx_multicast);
3183         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3184                             I40E_GLV_BPTCL(hw->port),
3185                             pf->offset_loaded,
3186                             &pf->internal_stats_offset.tx_broadcast,
3187                             &pf->internal_stats.tx_broadcast);
3188
3189         /* exclude CRC size */
3190         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3191                 pf->internal_stats.rx_multicast +
3192                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3193
3194         /* Get statistics of struct i40e_eth_stats */
3195         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3196                                   I40E_GLPRT_GORCL(hw->port),
3197                                   pf->offset_loaded, &os->eth.rx_bytes,
3198                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3199         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3200                             I40E_GLPRT_UPRCL(hw->port),
3201                             pf->offset_loaded, &os->eth.rx_unicast,
3202                             &ns->eth.rx_unicast);
3203         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3204                             I40E_GLPRT_MPRCL(hw->port),
3205                             pf->offset_loaded, &os->eth.rx_multicast,
3206                             &ns->eth.rx_multicast);
3207         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3208                             I40E_GLPRT_BPRCL(hw->port),
3209                             pf->offset_loaded, &os->eth.rx_broadcast,
3210                             &ns->eth.rx_broadcast);
3211         /* Workaround: CRC size should not be included in byte statistics,
3212          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3213          * packet.
3214          */
3215         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3216                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3217
3218         /* exclude internal rx bytes
3219          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3220          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3221          * value.
3222          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3223          */
3224         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3225                 ns->eth.rx_bytes = 0;
3226         else
3227                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3228
3229         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3230                 ns->eth.rx_unicast = 0;
3231         else
3232                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3233
3234         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3235                 ns->eth.rx_multicast = 0;
3236         else
3237                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3238
3239         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3240                 ns->eth.rx_broadcast = 0;
3241         else
3242                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3243
3244         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3245                             pf->offset_loaded, &os->eth.rx_discards,
3246                             &ns->eth.rx_discards);
3247         /* GLPRT_REPC not supported */
3248         /* GLPRT_RMPC not supported */
3249         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3250                             pf->offset_loaded,
3251                             &os->eth.rx_unknown_protocol,
3252                             &ns->eth.rx_unknown_protocol);
3253         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3254                                   I40E_GLPRT_GOTCL(hw->port),
3255                                   pf->offset_loaded, &os->eth.tx_bytes,
3256                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3257         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3258                             I40E_GLPRT_UPTCL(hw->port),
3259                             pf->offset_loaded, &os->eth.tx_unicast,
3260                             &ns->eth.tx_unicast);
3261         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3262                             I40E_GLPRT_MPTCL(hw->port),
3263                             pf->offset_loaded, &os->eth.tx_multicast,
3264                             &ns->eth.tx_multicast);
3265         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3266                             I40E_GLPRT_BPTCL(hw->port),
3267                             pf->offset_loaded, &os->eth.tx_broadcast,
3268                             &ns->eth.tx_broadcast);
3269         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3270                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3271
3272         /* exclude internal tx bytes
3273          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3274          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3275          * value.
3276          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3277          */
3278         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3279                 ns->eth.tx_bytes = 0;
3280         else
3281                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3282
3283         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3284                 ns->eth.tx_unicast = 0;
3285         else
3286                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3287
3288         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3289                 ns->eth.tx_multicast = 0;
3290         else
3291                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3292
3293         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3294                 ns->eth.tx_broadcast = 0;
3295         else
3296                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3297
3298         /* GLPRT_TEPC not supported */
3299
3300         /* additional port specific stats */
3301         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3302                             pf->offset_loaded, &os->tx_dropped_link_down,
3303                             &ns->tx_dropped_link_down);
3304         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3305                             pf->offset_loaded, &os->crc_errors,
3306                             &ns->crc_errors);
3307         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3308                             pf->offset_loaded, &os->illegal_bytes,
3309                             &ns->illegal_bytes);
3310         /* GLPRT_ERRBC not supported */
3311         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3312                             pf->offset_loaded, &os->mac_local_faults,
3313                             &ns->mac_local_faults);
3314         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3315                             pf->offset_loaded, &os->mac_remote_faults,
3316                             &ns->mac_remote_faults);
3317         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3318                             pf->offset_loaded, &os->rx_length_errors,
3319                             &ns->rx_length_errors);
3320         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3321                             pf->offset_loaded, &os->link_xon_rx,
3322                             &ns->link_xon_rx);
3323         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3324                             pf->offset_loaded, &os->link_xoff_rx,
3325                             &ns->link_xoff_rx);
3326         for (i = 0; i < 8; i++) {
3327                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3328                                     pf->offset_loaded,
3329                                     &os->priority_xon_rx[i],
3330                                     &ns->priority_xon_rx[i]);
3331                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3332                                     pf->offset_loaded,
3333                                     &os->priority_xoff_rx[i],
3334                                     &ns->priority_xoff_rx[i]);
3335         }
3336         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3337                             pf->offset_loaded, &os->link_xon_tx,
3338                             &ns->link_xon_tx);
3339         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3340                             pf->offset_loaded, &os->link_xoff_tx,
3341                             &ns->link_xoff_tx);
3342         for (i = 0; i < 8; i++) {
3343                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3344                                     pf->offset_loaded,
3345                                     &os->priority_xon_tx[i],
3346                                     &ns->priority_xon_tx[i]);
3347                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3348                                     pf->offset_loaded,
3349                                     &os->priority_xoff_tx[i],
3350                                     &ns->priority_xoff_tx[i]);
3351                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3352                                     pf->offset_loaded,
3353                                     &os->priority_xon_2_xoff[i],
3354                                     &ns->priority_xon_2_xoff[i]);
3355         }
3356         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3357                             I40E_GLPRT_PRC64L(hw->port),
3358                             pf->offset_loaded, &os->rx_size_64,
3359                             &ns->rx_size_64);
3360         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3361                             I40E_GLPRT_PRC127L(hw->port),
3362                             pf->offset_loaded, &os->rx_size_127,
3363                             &ns->rx_size_127);
3364         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3365                             I40E_GLPRT_PRC255L(hw->port),
3366                             pf->offset_loaded, &os->rx_size_255,
3367                             &ns->rx_size_255);
3368         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3369                             I40E_GLPRT_PRC511L(hw->port),
3370                             pf->offset_loaded, &os->rx_size_511,
3371                             &ns->rx_size_511);
3372         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3373                             I40E_GLPRT_PRC1023L(hw->port),
3374                             pf->offset_loaded, &os->rx_size_1023,
3375                             &ns->rx_size_1023);
3376         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3377                             I40E_GLPRT_PRC1522L(hw->port),
3378                             pf->offset_loaded, &os->rx_size_1522,
3379                             &ns->rx_size_1522);
3380         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3381                             I40E_GLPRT_PRC9522L(hw->port),
3382                             pf->offset_loaded, &os->rx_size_big,
3383                             &ns->rx_size_big);
3384         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3385                             pf->offset_loaded, &os->rx_undersize,
3386                             &ns->rx_undersize);
3387         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3388                             pf->offset_loaded, &os->rx_fragments,
3389                             &ns->rx_fragments);
3390         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3391                             pf->offset_loaded, &os->rx_oversize,
3392                             &ns->rx_oversize);
3393         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3394                             pf->offset_loaded, &os->rx_jabber,
3395                             &ns->rx_jabber);
3396         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3397                             I40E_GLPRT_PTC64L(hw->port),
3398                             pf->offset_loaded, &os->tx_size_64,
3399                             &ns->tx_size_64);
3400         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3401                             I40E_GLPRT_PTC127L(hw->port),
3402                             pf->offset_loaded, &os->tx_size_127,
3403                             &ns->tx_size_127);
3404         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3405                             I40E_GLPRT_PTC255L(hw->port),
3406                             pf->offset_loaded, &os->tx_size_255,
3407                             &ns->tx_size_255);
3408         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3409                             I40E_GLPRT_PTC511L(hw->port),
3410                             pf->offset_loaded, &os->tx_size_511,
3411                             &ns->tx_size_511);
3412         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3413                             I40E_GLPRT_PTC1023L(hw->port),
3414                             pf->offset_loaded, &os->tx_size_1023,
3415                             &ns->tx_size_1023);
3416         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3417                             I40E_GLPRT_PTC1522L(hw->port),
3418                             pf->offset_loaded, &os->tx_size_1522,
3419                             &ns->tx_size_1522);
3420         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3421                             I40E_GLPRT_PTC9522L(hw->port),
3422                             pf->offset_loaded, &os->tx_size_big,
3423                             &ns->tx_size_big);
3424         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3425                            pf->offset_loaded,
3426                            &os->fd_sb_match, &ns->fd_sb_match);
3427         /* GLPRT_MSPDC not supported */
3428         /* GLPRT_XEC not supported */
3429
3430         pf->offset_loaded = true;
3431
3432         if (pf->main_vsi)
3433                 i40e_update_vsi_stats(pf->main_vsi);
3434 }
3435
3436 /* Get all statistics of a port */
3437 static int
3438 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3439 {
3440         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3441         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3442         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3443         struct i40e_vsi *vsi;
3444         unsigned i;
3445
3446         /* call read registers - updates values, now write them to struct */
3447         i40e_read_stats_registers(pf, hw);
3448
3449         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3450                         pf->main_vsi->eth_stats.rx_multicast +
3451                         pf->main_vsi->eth_stats.rx_broadcast -
3452                         pf->main_vsi->eth_stats.rx_discards;
3453         stats->opackets = ns->eth.tx_unicast +
3454                         ns->eth.tx_multicast +
3455                         ns->eth.tx_broadcast;
3456         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3457         stats->obytes   = ns->eth.tx_bytes;
3458         stats->oerrors  = ns->eth.tx_errors +
3459                         pf->main_vsi->eth_stats.tx_errors;
3460
3461         /* Rx Errors */
3462         stats->imissed  = ns->eth.rx_discards +
3463                         pf->main_vsi->eth_stats.rx_discards;
3464         stats->ierrors  = ns->crc_errors +
3465                         ns->rx_length_errors + ns->rx_undersize +
3466                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3467
3468         if (pf->vfs) {
3469                 for (i = 0; i < pf->vf_num; i++) {
3470                         vsi = pf->vfs[i].vsi;
3471                         i40e_update_vsi_stats(vsi);
3472
3473                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3474                                         vsi->eth_stats.rx_multicast +
3475                                         vsi->eth_stats.rx_broadcast -
3476                                         vsi->eth_stats.rx_discards);
3477                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3478                         stats->oerrors  += vsi->eth_stats.tx_errors;
3479                         stats->imissed  += vsi->eth_stats.rx_discards;
3480                 }
3481         }
3482
3483         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3484         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3485         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3486         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3487         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3488         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3489         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3490                     ns->eth.rx_unknown_protocol);
3491         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3492         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3493         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3494         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3495         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3496         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3497
3498         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3499                     ns->tx_dropped_link_down);
3500         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3501         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3502                     ns->illegal_bytes);
3503         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3504         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3505                     ns->mac_local_faults);
3506         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3507                     ns->mac_remote_faults);
3508         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3509                     ns->rx_length_errors);
3510         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3511         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3512         for (i = 0; i < 8; i++) {
3513                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3514                                 i, ns->priority_xon_rx[i]);
3515                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3516                                 i, ns->priority_xoff_rx[i]);
3517         }
3518         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3519         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3520         for (i = 0; i < 8; i++) {
3521                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3522                                 i, ns->priority_xon_tx[i]);
3523                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3524                                 i, ns->priority_xoff_tx[i]);
3525                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3526                                 i, ns->priority_xon_2_xoff[i]);
3527         }
3528         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3529         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3530         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3531         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3532         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3533         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3534         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3535         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3536         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3537         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3538         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3539         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3540         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3541         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3542         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3543         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3544         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3545         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3546         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3547                         ns->mac_short_packet_dropped);
3548         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3549                     ns->checksum_error);
3550         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3551         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3552         return 0;
3553 }
3554
3555 /* Reset the statistics */
3556 static int
3557 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3558 {
3559         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3560         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3561
3562         /* Mark PF and VSI stats to update the offset, aka "reset" */
3563         pf->offset_loaded = false;
3564         if (pf->main_vsi)
3565                 pf->main_vsi->offset_loaded = false;
3566
3567         /* read the stats, reading current register values into offset */
3568         i40e_read_stats_registers(pf, hw);
3569
3570         return 0;
3571 }
3572
3573 static uint32_t
3574 i40e_xstats_calc_num(void)
3575 {
3576         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3577                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3578                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3579 }
3580
3581 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3582                                      struct rte_eth_xstat_name *xstats_names,
3583                                      __rte_unused unsigned limit)
3584 {
3585         unsigned count = 0;
3586         unsigned i, prio;
3587
3588         if (xstats_names == NULL)
3589                 return i40e_xstats_calc_num();
3590
3591         /* Note: limit checked in rte_eth_xstats_names() */
3592
3593         /* Get stats from i40e_eth_stats struct */
3594         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3595                 strlcpy(xstats_names[count].name,
3596                         rte_i40e_stats_strings[i].name,
3597                         sizeof(xstats_names[count].name));
3598                 count++;
3599         }
3600
3601         /* Get individiual stats from i40e_hw_port struct */
3602         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3603                 strlcpy(xstats_names[count].name,
3604                         rte_i40e_hw_port_strings[i].name,
3605                         sizeof(xstats_names[count].name));
3606                 count++;
3607         }
3608
3609         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3610                 for (prio = 0; prio < 8; prio++) {
3611                         snprintf(xstats_names[count].name,
3612                                  sizeof(xstats_names[count].name),
3613                                  "rx_priority%u_%s", prio,
3614                                  rte_i40e_rxq_prio_strings[i].name);
3615                         count++;
3616                 }
3617         }
3618
3619         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3620                 for (prio = 0; prio < 8; prio++) {
3621                         snprintf(xstats_names[count].name,
3622                                  sizeof(xstats_names[count].name),
3623                                  "tx_priority%u_%s", prio,
3624                                  rte_i40e_txq_prio_strings[i].name);
3625                         count++;
3626                 }
3627         }
3628         return count;
3629 }
3630
3631 static int
3632 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3633                     unsigned n)
3634 {
3635         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3636         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3637         unsigned i, count, prio;
3638         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3639
3640         count = i40e_xstats_calc_num();
3641         if (n < count)
3642                 return count;
3643
3644         i40e_read_stats_registers(pf, hw);
3645
3646         if (xstats == NULL)
3647                 return 0;
3648
3649         count = 0;
3650
3651         /* Get stats from i40e_eth_stats struct */
3652         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3653                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3654                         rte_i40e_stats_strings[i].offset);
3655                 xstats[count].id = count;
3656                 count++;
3657         }
3658
3659         /* Get individiual stats from i40e_hw_port struct */
3660         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3661                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3662                         rte_i40e_hw_port_strings[i].offset);
3663                 xstats[count].id = count;
3664                 count++;
3665         }
3666
3667         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3668                 for (prio = 0; prio < 8; prio++) {
3669                         xstats[count].value =
3670                                 *(uint64_t *)(((char *)hw_stats) +
3671                                 rte_i40e_rxq_prio_strings[i].offset +
3672                                 (sizeof(uint64_t) * prio));
3673                         xstats[count].id = count;
3674                         count++;
3675                 }
3676         }
3677
3678         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3679                 for (prio = 0; prio < 8; prio++) {
3680                         xstats[count].value =
3681                                 *(uint64_t *)(((char *)hw_stats) +
3682                                 rte_i40e_txq_prio_strings[i].offset +
3683                                 (sizeof(uint64_t) * prio));
3684                         xstats[count].id = count;
3685                         count++;
3686                 }
3687         }
3688
3689         return count;
3690 }
3691
3692 static int
3693 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3694 {
3695         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3696         u32 full_ver;
3697         u8 ver, patch;
3698         u16 build;
3699         int ret;
3700
3701         full_ver = hw->nvm.oem_ver;
3702         ver = (u8)(full_ver >> 24);
3703         build = (u16)((full_ver >> 8) & 0xffff);
3704         patch = (u8)(full_ver & 0xff);
3705
3706         ret = snprintf(fw_version, fw_size,
3707                  "%d.%d%d 0x%08x %d.%d.%d",
3708                  ((hw->nvm.version >> 12) & 0xf),
3709                  ((hw->nvm.version >> 4) & 0xff),
3710                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3711                  ver, build, patch);
3712
3713         ret += 1; /* add the size of '\0' */
3714         if (fw_size < (u32)ret)
3715                 return ret;
3716         else
3717                 return 0;
3718 }
3719
3720 /*
3721  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3722  * the Rx data path does not hang if the FW LLDP is stopped.
3723  * return true if lldp need to stop
3724  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3725  */
3726 static bool
3727 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3728 {
3729         double nvm_ver;
3730         char ver_str[64] = {0};
3731         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3732
3733         i40e_fw_version_get(dev, ver_str, 64);
3734         nvm_ver = atof(ver_str);
3735         if ((hw->mac.type == I40E_MAC_X722 ||
3736              hw->mac.type == I40E_MAC_X722_VF) &&
3737              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3738                 return true;
3739         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3740                 return true;
3741
3742         return false;
3743 }
3744
3745 static int
3746 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3747 {
3748         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3749         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3750         struct i40e_vsi *vsi = pf->main_vsi;
3751         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3752
3753         dev_info->max_rx_queues = vsi->nb_qps;
3754         dev_info->max_tx_queues = vsi->nb_qps;
3755         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3756         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3757         dev_info->max_mac_addrs = vsi->max_macaddrs;
3758         dev_info->max_vfs = pci_dev->max_vfs;
3759         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3760         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3761         dev_info->rx_queue_offload_capa = 0;
3762         dev_info->rx_offload_capa =
3763                 DEV_RX_OFFLOAD_VLAN_STRIP |
3764                 DEV_RX_OFFLOAD_QINQ_STRIP |
3765                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3766                 DEV_RX_OFFLOAD_UDP_CKSUM |
3767                 DEV_RX_OFFLOAD_TCP_CKSUM |
3768                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3769                 DEV_RX_OFFLOAD_KEEP_CRC |
3770                 DEV_RX_OFFLOAD_SCATTER |
3771                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3772                 DEV_RX_OFFLOAD_VLAN_FILTER |
3773                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3774                 DEV_RX_OFFLOAD_RSS_HASH;
3775
3776         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3777         dev_info->tx_offload_capa =
3778                 DEV_TX_OFFLOAD_VLAN_INSERT |
3779                 DEV_TX_OFFLOAD_QINQ_INSERT |
3780                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3781                 DEV_TX_OFFLOAD_UDP_CKSUM |
3782                 DEV_TX_OFFLOAD_TCP_CKSUM |
3783                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3784                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3785                 DEV_TX_OFFLOAD_TCP_TSO |
3786                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3787                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3788                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3789                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3790                 DEV_TX_OFFLOAD_MULTI_SEGS |
3791                 dev_info->tx_queue_offload_capa;
3792         dev_info->dev_capa =
3793                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3794                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3795
3796         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3797                                                 sizeof(uint32_t);
3798         dev_info->reta_size = pf->hash_lut_size;
3799         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3800
3801         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3802                 .rx_thresh = {
3803                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3804                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3805                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3806                 },
3807                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3808                 .rx_drop_en = 0,
3809                 .offloads = 0,
3810         };
3811
3812         dev_info->default_txconf = (struct rte_eth_txconf) {
3813                 .tx_thresh = {
3814                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3815                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3816                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3817                 },
3818                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3819                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3820                 .offloads = 0,
3821         };
3822
3823         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3824                 .nb_max = I40E_MAX_RING_DESC,
3825                 .nb_min = I40E_MIN_RING_DESC,
3826                 .nb_align = I40E_ALIGN_RING_DESC,
3827         };
3828
3829         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3830                 .nb_max = I40E_MAX_RING_DESC,
3831                 .nb_min = I40E_MIN_RING_DESC,
3832                 .nb_align = I40E_ALIGN_RING_DESC,
3833                 .nb_seg_max = I40E_TX_MAX_SEG,
3834                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3835         };
3836
3837         if (pf->flags & I40E_FLAG_VMDQ) {
3838                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3839                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3840                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3841                                                 pf->max_nb_vmdq_vsi;
3842                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3843                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3844                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3845         }
3846
3847         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3848                 /* For XL710 */
3849                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3850                 dev_info->default_rxportconf.nb_queues = 2;
3851                 dev_info->default_txportconf.nb_queues = 2;
3852                 if (dev->data->nb_rx_queues == 1)
3853                         dev_info->default_rxportconf.ring_size = 2048;
3854                 else
3855                         dev_info->default_rxportconf.ring_size = 1024;
3856                 if (dev->data->nb_tx_queues == 1)
3857                         dev_info->default_txportconf.ring_size = 1024;
3858                 else
3859                         dev_info->default_txportconf.ring_size = 512;
3860
3861         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3862                 /* For XXV710 */
3863                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3864                 dev_info->default_rxportconf.nb_queues = 1;
3865                 dev_info->default_txportconf.nb_queues = 1;
3866                 dev_info->default_rxportconf.ring_size = 256;
3867                 dev_info->default_txportconf.ring_size = 256;
3868         } else {
3869                 /* For X710 */
3870                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3871                 dev_info->default_rxportconf.nb_queues = 1;
3872                 dev_info->default_txportconf.nb_queues = 1;
3873                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3874                         dev_info->default_rxportconf.ring_size = 512;
3875                         dev_info->default_txportconf.ring_size = 256;
3876                 } else {
3877                         dev_info->default_rxportconf.ring_size = 256;
3878                         dev_info->default_txportconf.ring_size = 256;
3879                 }
3880         }
3881         dev_info->default_rxportconf.burst_size = 32;
3882         dev_info->default_txportconf.burst_size = 32;
3883
3884         return 0;
3885 }
3886
3887 static int
3888 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3889 {
3890         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3891         struct i40e_vsi *vsi = pf->main_vsi;
3892         PMD_INIT_FUNC_TRACE();
3893
3894         if (on)
3895                 return i40e_vsi_add_vlan(vsi, vlan_id);
3896         else
3897                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3898 }
3899
3900 static int
3901 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3902                                 enum rte_vlan_type vlan_type,
3903                                 uint16_t tpid, int qinq)
3904 {
3905         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3906         uint64_t reg_r = 0;
3907         uint64_t reg_w = 0;
3908         uint16_t reg_id = 3;
3909         int ret;
3910
3911         if (qinq) {
3912                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3913                         reg_id = 2;
3914         }
3915
3916         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3917                                           &reg_r, NULL);
3918         if (ret != I40E_SUCCESS) {
3919                 PMD_DRV_LOG(ERR,
3920                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3921                            reg_id);
3922                 return -EIO;
3923         }
3924         PMD_DRV_LOG(DEBUG,
3925                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3926                     reg_id, reg_r);
3927
3928         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3929         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3930         if (reg_r == reg_w) {
3931                 PMD_DRV_LOG(DEBUG, "No need to write");
3932                 return 0;
3933         }
3934
3935         ret = i40e_aq_debug_write_global_register(hw,
3936                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3937                                            reg_w, NULL);
3938         if (ret != I40E_SUCCESS) {
3939                 PMD_DRV_LOG(ERR,
3940                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3941                             reg_id);
3942                 return -EIO;
3943         }
3944         PMD_DRV_LOG(DEBUG,
3945                     "Global register 0x%08x is changed with value 0x%08x",
3946                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3947
3948         return 0;
3949 }
3950
3951 static int
3952 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3953                    enum rte_vlan_type vlan_type,
3954                    uint16_t tpid)
3955 {
3956         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3957         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3958         int qinq = dev->data->dev_conf.rxmode.offloads &
3959                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3960         int ret = 0;
3961
3962         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3963              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3964             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3965                 PMD_DRV_LOG(ERR,
3966                             "Unsupported vlan type.");
3967                 return -EINVAL;
3968         }
3969
3970         if (pf->support_multi_driver) {
3971                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3972                 return -ENOTSUP;
3973         }
3974
3975         /* 802.1ad frames ability is added in NVM API 1.7*/
3976         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3977                 if (qinq) {
3978                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3979                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3980                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3981                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3982                 } else {
3983                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3984                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3985                 }
3986                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3987                 if (ret != I40E_SUCCESS) {
3988                         PMD_DRV_LOG(ERR,
3989                                     "Set switch config failed aq_err: %d",
3990                                     hw->aq.asq_last_status);
3991                         ret = -EIO;
3992                 }
3993         } else
3994                 /* If NVM API < 1.7, keep the register setting */
3995                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3996                                                       tpid, qinq);
3997
3998         return ret;
3999 }
4000
4001 /* Configure outer vlan stripping on or off in QinQ mode */
4002 static int
4003 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
4004 {
4005         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4006         int ret = I40E_SUCCESS;
4007         uint32_t reg;
4008
4009         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
4010                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
4011                 return -EINVAL;
4012         }
4013
4014         /* Configure for outer VLAN RX stripping */
4015         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4016
4017         if (on)
4018                 reg |= I40E_VSI_TSR_QINQ_STRIP;
4019         else
4020                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4021
4022         ret = i40e_aq_debug_write_register(hw,
4023                                                    I40E_VSI_TSR(vsi->vsi_id),
4024                                                    reg, NULL);
4025         if (ret < 0) {
4026                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4027                                     vsi->vsi_id);
4028                 return I40E_ERR_CONFIG;
4029         }
4030
4031         return ret;
4032 }
4033
4034 static int
4035 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4036 {
4037         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4038         struct i40e_vsi *vsi = pf->main_vsi;
4039         struct rte_eth_rxmode *rxmode;
4040
4041         rxmode = &dev->data->dev_conf.rxmode;
4042         if (mask & ETH_VLAN_FILTER_MASK) {
4043                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4044                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4045                 else
4046                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4047         }
4048
4049         if (mask & ETH_VLAN_STRIP_MASK) {
4050                 /* Enable or disable VLAN stripping */
4051                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4052                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4053                 else
4054                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4055         }
4056
4057         if (mask & ETH_VLAN_EXTEND_MASK) {
4058                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4059                         i40e_vsi_config_double_vlan(vsi, TRUE);
4060                         /* Set global registers with default ethertype. */
4061                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4062                                            RTE_ETHER_TYPE_VLAN);
4063                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4064                                            RTE_ETHER_TYPE_VLAN);
4065                 }
4066                 else
4067                         i40e_vsi_config_double_vlan(vsi, FALSE);
4068         }
4069
4070         if (mask & ETH_QINQ_STRIP_MASK) {
4071                 /* Enable or disable outer VLAN stripping */
4072                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4073                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4074                 else
4075                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4076         }
4077
4078         return 0;
4079 }
4080
4081 static void
4082 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4083                           __rte_unused uint16_t queue,
4084                           __rte_unused int on)
4085 {
4086         PMD_INIT_FUNC_TRACE();
4087 }
4088
4089 static int
4090 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4091 {
4092         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4093         struct i40e_vsi *vsi = pf->main_vsi;
4094         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4095         struct i40e_vsi_vlan_pvid_info info;
4096
4097         memset(&info, 0, sizeof(info));
4098         info.on = on;
4099         if (info.on)
4100                 info.config.pvid = pvid;
4101         else {
4102                 info.config.reject.tagged =
4103                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4104                 info.config.reject.untagged =
4105                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4106         }
4107
4108         return i40e_vsi_vlan_pvid_set(vsi, &info);
4109 }
4110
4111 static int
4112 i40e_dev_led_on(struct rte_eth_dev *dev)
4113 {
4114         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4115         uint32_t mode = i40e_led_get(hw);
4116
4117         if (mode == 0)
4118                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4119
4120         return 0;
4121 }
4122
4123 static int
4124 i40e_dev_led_off(struct rte_eth_dev *dev)
4125 {
4126         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4127         uint32_t mode = i40e_led_get(hw);
4128
4129         if (mode != 0)
4130                 i40e_led_set(hw, 0, false);
4131
4132         return 0;
4133 }
4134
4135 static int
4136 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4137 {
4138         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4139         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4140
4141         fc_conf->pause_time = pf->fc_conf.pause_time;
4142
4143         /* read out from register, in case they are modified by other port */
4144         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4145                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4146         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4147                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4148
4149         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4150         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4151
4152          /* Return current mode according to actual setting*/
4153         switch (hw->fc.current_mode) {
4154         case I40E_FC_FULL:
4155                 fc_conf->mode = RTE_FC_FULL;
4156                 break;
4157         case I40E_FC_TX_PAUSE:
4158                 fc_conf->mode = RTE_FC_TX_PAUSE;
4159                 break;
4160         case I40E_FC_RX_PAUSE:
4161                 fc_conf->mode = RTE_FC_RX_PAUSE;
4162                 break;
4163         case I40E_FC_NONE:
4164         default:
4165                 fc_conf->mode = RTE_FC_NONE;
4166         };
4167
4168         return 0;
4169 }
4170
4171 static int
4172 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4173 {
4174         uint32_t mflcn_reg, fctrl_reg, reg;
4175         uint32_t max_high_water;
4176         uint8_t i, aq_failure;
4177         int err;
4178         struct i40e_hw *hw;
4179         struct i40e_pf *pf;
4180         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4181                 [RTE_FC_NONE] = I40E_FC_NONE,
4182                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4183                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4184                 [RTE_FC_FULL] = I40E_FC_FULL
4185         };
4186
4187         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4188
4189         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4190         if ((fc_conf->high_water > max_high_water) ||
4191                         (fc_conf->high_water < fc_conf->low_water)) {
4192                 PMD_INIT_LOG(ERR,
4193                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4194                         max_high_water);
4195                 return -EINVAL;
4196         }
4197
4198         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4199         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4200         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4201
4202         pf->fc_conf.pause_time = fc_conf->pause_time;
4203         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4204         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4205
4206         PMD_INIT_FUNC_TRACE();
4207
4208         /* All the link flow control related enable/disable register
4209          * configuration is handle by the F/W
4210          */
4211         err = i40e_set_fc(hw, &aq_failure, true);
4212         if (err < 0)
4213                 return -ENOSYS;
4214
4215         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4216                 /* Configure flow control refresh threshold,
4217                  * the value for stat_tx_pause_refresh_timer[8]
4218                  * is used for global pause operation.
4219                  */
4220
4221                 I40E_WRITE_REG(hw,
4222                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4223                                pf->fc_conf.pause_time);
4224
4225                 /* configure the timer value included in transmitted pause
4226                  * frame,
4227                  * the value for stat_tx_pause_quanta[8] is used for global
4228                  * pause operation
4229                  */
4230                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4231                                pf->fc_conf.pause_time);
4232
4233                 fctrl_reg = I40E_READ_REG(hw,
4234                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4235
4236                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4237                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4238                 else
4239                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4240
4241                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4242                                fctrl_reg);
4243         } else {
4244                 /* Configure pause time (2 TCs per register) */
4245                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4246                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4247                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4248
4249                 /* Configure flow control refresh threshold value */
4250                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4251                                pf->fc_conf.pause_time / 2);
4252
4253                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4254
4255                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4256                  *depending on configuration
4257                  */
4258                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4259                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4260                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4261                 } else {
4262                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4263                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4264                 }
4265
4266                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4267         }
4268
4269         if (!pf->support_multi_driver) {
4270                 /* config water marker both based on the packets and bytes */
4271                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4272                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4273                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4274                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4275                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4276                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4277                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4278                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4279                                   << I40E_KILOSHIFT);
4280                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4281                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4282                                    << I40E_KILOSHIFT);
4283         } else {
4284                 PMD_DRV_LOG(ERR,
4285                             "Water marker configuration is not supported.");
4286         }
4287
4288         I40E_WRITE_FLUSH(hw);
4289
4290         return 0;
4291 }
4292
4293 static int
4294 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4295                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4296 {
4297         PMD_INIT_FUNC_TRACE();
4298
4299         return -ENOSYS;
4300 }
4301
4302 /* Add a MAC address, and update filters */
4303 static int
4304 i40e_macaddr_add(struct rte_eth_dev *dev,
4305                  struct rte_ether_addr *mac_addr,
4306                  __rte_unused uint32_t index,
4307                  uint32_t pool)
4308 {
4309         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4310         struct i40e_mac_filter_info mac_filter;
4311         struct i40e_vsi *vsi;
4312         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4313         int ret;
4314
4315         /* If VMDQ not enabled or configured, return */
4316         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4317                           !pf->nb_cfg_vmdq_vsi)) {
4318                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4319                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4320                         pool);
4321                 return -ENOTSUP;
4322         }
4323
4324         if (pool > pf->nb_cfg_vmdq_vsi) {
4325                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4326                                 pool, pf->nb_cfg_vmdq_vsi);
4327                 return -EINVAL;
4328         }
4329
4330         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4331         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4332                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4333         else
4334                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
4335
4336         if (pool == 0)
4337                 vsi = pf->main_vsi;
4338         else
4339                 vsi = pf->vmdq[pool - 1].vsi;
4340
4341         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4342         if (ret != I40E_SUCCESS) {
4343                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4344                 return -ENODEV;
4345         }
4346         return 0;
4347 }
4348
4349 /* Remove a MAC address, and update filters */
4350 static void
4351 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4352 {
4353         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4354         struct i40e_vsi *vsi;
4355         struct rte_eth_dev_data *data = dev->data;
4356         struct rte_ether_addr *macaddr;
4357         int ret;
4358         uint32_t i;
4359         uint64_t pool_sel;
4360
4361         macaddr = &(data->mac_addrs[index]);
4362
4363         pool_sel = dev->data->mac_pool_sel[index];
4364
4365         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4366                 if (pool_sel & (1ULL << i)) {
4367                         if (i == 0)
4368                                 vsi = pf->main_vsi;
4369                         else {
4370                                 /* No VMDQ pool enabled or configured */
4371                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4372                                         (i > pf->nb_cfg_vmdq_vsi)) {
4373                                         PMD_DRV_LOG(ERR,
4374                                                 "No VMDQ pool enabled/configured");
4375                                         return;
4376                                 }
4377                                 vsi = pf->vmdq[i - 1].vsi;
4378                         }
4379                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4380
4381                         if (ret) {
4382                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4383                                 return;
4384                         }
4385                 }
4386         }
4387 }
4388
4389 static int
4390 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4391 {
4392         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4393         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4394         uint32_t reg;
4395         int ret;
4396
4397         if (!lut)
4398                 return -EINVAL;
4399
4400         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4401                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4402                                           vsi->type != I40E_VSI_SRIOV,
4403                                           lut, lut_size);
4404                 if (ret) {
4405                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4406                         return ret;
4407                 }
4408         } else {
4409                 uint32_t *lut_dw = (uint32_t *)lut;
4410                 uint16_t i, lut_size_dw = lut_size / 4;
4411
4412                 if (vsi->type == I40E_VSI_SRIOV) {
4413                         for (i = 0; i <= lut_size_dw; i++) {
4414                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4415                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4416                         }
4417                 } else {
4418                         for (i = 0; i < lut_size_dw; i++)
4419                                 lut_dw[i] = I40E_READ_REG(hw,
4420                                                           I40E_PFQF_HLUT(i));
4421                 }
4422         }
4423
4424         return 0;
4425 }
4426
4427 int
4428 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4429 {
4430         struct i40e_pf *pf;
4431         struct i40e_hw *hw;
4432         int ret;
4433
4434         if (!vsi || !lut)
4435                 return -EINVAL;
4436
4437         pf = I40E_VSI_TO_PF(vsi);
4438         hw = I40E_VSI_TO_HW(vsi);
4439
4440         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4441                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4442                                           vsi->type != I40E_VSI_SRIOV,
4443                                           lut, lut_size);
4444                 if (ret) {
4445                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4446                         return ret;
4447                 }
4448         } else {
4449                 uint32_t *lut_dw = (uint32_t *)lut;
4450                 uint16_t i, lut_size_dw = lut_size / 4;
4451
4452                 if (vsi->type == I40E_VSI_SRIOV) {
4453                         for (i = 0; i < lut_size_dw; i++)
4454                                 I40E_WRITE_REG(
4455                                         hw,
4456                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4457                                         lut_dw[i]);
4458                 } else {
4459                         for (i = 0; i < lut_size_dw; i++)
4460                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4461                                                lut_dw[i]);
4462                 }
4463                 I40E_WRITE_FLUSH(hw);
4464         }
4465
4466         return 0;
4467 }
4468
4469 static int
4470 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4471                          struct rte_eth_rss_reta_entry64 *reta_conf,
4472                          uint16_t reta_size)
4473 {
4474         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4475         uint16_t i, lut_size = pf->hash_lut_size;
4476         uint16_t idx, shift;
4477         uint8_t *lut;
4478         int ret;
4479
4480         if (reta_size != lut_size ||
4481                 reta_size > ETH_RSS_RETA_SIZE_512) {
4482                 PMD_DRV_LOG(ERR,
4483                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4484                         reta_size, lut_size);
4485                 return -EINVAL;
4486         }
4487
4488         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4489         if (!lut) {
4490                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4491                 return -ENOMEM;
4492         }
4493         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4494         if (ret)
4495                 goto out;
4496         for (i = 0; i < reta_size; i++) {
4497                 idx = i / RTE_RETA_GROUP_SIZE;
4498                 shift = i % RTE_RETA_GROUP_SIZE;
4499                 if (reta_conf[idx].mask & (1ULL << shift))
4500                         lut[i] = reta_conf[idx].reta[shift];
4501         }
4502         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4503
4504         pf->adapter->rss_reta_updated = 1;
4505
4506 out:
4507         rte_free(lut);
4508
4509         return ret;
4510 }
4511
4512 static int
4513 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4514                         struct rte_eth_rss_reta_entry64 *reta_conf,
4515                         uint16_t reta_size)
4516 {
4517         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4518         uint16_t i, lut_size = pf->hash_lut_size;
4519         uint16_t idx, shift;
4520         uint8_t *lut;
4521         int ret;
4522
4523         if (reta_size != lut_size ||
4524                 reta_size > ETH_RSS_RETA_SIZE_512) {
4525                 PMD_DRV_LOG(ERR,
4526                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4527                         reta_size, lut_size);
4528                 return -EINVAL;
4529         }
4530
4531         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4532         if (!lut) {
4533                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4534                 return -ENOMEM;
4535         }
4536
4537         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4538         if (ret)
4539                 goto out;
4540         for (i = 0; i < reta_size; i++) {
4541                 idx = i / RTE_RETA_GROUP_SIZE;
4542                 shift = i % RTE_RETA_GROUP_SIZE;
4543                 if (reta_conf[idx].mask & (1ULL << shift))
4544                         reta_conf[idx].reta[shift] = lut[i];
4545         }
4546
4547 out:
4548         rte_free(lut);
4549
4550         return ret;
4551 }
4552
4553 /**
4554  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4555  * @hw:   pointer to the HW structure
4556  * @mem:  pointer to mem struct to fill out
4557  * @size: size of memory requested
4558  * @alignment: what to align the allocation to
4559  **/
4560 enum i40e_status_code
4561 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4562                         struct i40e_dma_mem *mem,
4563                         u64 size,
4564                         u32 alignment)
4565 {
4566         const struct rte_memzone *mz = NULL;
4567         char z_name[RTE_MEMZONE_NAMESIZE];
4568
4569         if (!mem)
4570                 return I40E_ERR_PARAM;
4571
4572         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4573         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4574                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4575         if (!mz)
4576                 return I40E_ERR_NO_MEMORY;
4577
4578         mem->size = size;
4579         mem->va = mz->addr;
4580         mem->pa = mz->iova;
4581         mem->zone = (const void *)mz;
4582         PMD_DRV_LOG(DEBUG,
4583                 "memzone %s allocated with physical address: %"PRIu64,
4584                 mz->name, mem->pa);
4585
4586         return I40E_SUCCESS;
4587 }
4588
4589 /**
4590  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4591  * @hw:   pointer to the HW structure
4592  * @mem:  ptr to mem struct to free
4593  **/
4594 enum i40e_status_code
4595 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4596                     struct i40e_dma_mem *mem)
4597 {
4598         if (!mem)
4599                 return I40E_ERR_PARAM;
4600
4601         PMD_DRV_LOG(DEBUG,
4602                 "memzone %s to be freed with physical address: %"PRIu64,
4603                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4604         rte_memzone_free((const struct rte_memzone *)mem->zone);
4605         mem->zone = NULL;
4606         mem->va = NULL;
4607         mem->pa = (u64)0;
4608
4609         return I40E_SUCCESS;
4610 }
4611
4612 /**
4613  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4614  * @hw:   pointer to the HW structure
4615  * @mem:  pointer to mem struct to fill out
4616  * @size: size of memory requested
4617  **/
4618 enum i40e_status_code
4619 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4620                          struct i40e_virt_mem *mem,
4621                          u32 size)
4622 {
4623         if (!mem)
4624                 return I40E_ERR_PARAM;
4625
4626         mem->size = size;
4627         mem->va = rte_zmalloc("i40e", size, 0);
4628
4629         if (mem->va)
4630                 return I40E_SUCCESS;
4631         else
4632                 return I40E_ERR_NO_MEMORY;
4633 }
4634
4635 /**
4636  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4637  * @hw:   pointer to the HW structure
4638  * @mem:  pointer to mem struct to free
4639  **/
4640 enum i40e_status_code
4641 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4642                      struct i40e_virt_mem *mem)
4643 {
4644         if (!mem)
4645                 return I40E_ERR_PARAM;
4646
4647         rte_free(mem->va);
4648         mem->va = NULL;
4649
4650         return I40E_SUCCESS;
4651 }
4652
4653 void
4654 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4655 {
4656         rte_spinlock_init(&sp->spinlock);
4657 }
4658
4659 void
4660 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4661 {
4662         rte_spinlock_lock(&sp->spinlock);
4663 }
4664
4665 void
4666 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4667 {
4668         rte_spinlock_unlock(&sp->spinlock);
4669 }
4670
4671 void
4672 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4673 {
4674         return;
4675 }
4676
4677 /**
4678  * Get the hardware capabilities, which will be parsed
4679  * and saved into struct i40e_hw.
4680  */
4681 static int
4682 i40e_get_cap(struct i40e_hw *hw)
4683 {
4684         struct i40e_aqc_list_capabilities_element_resp *buf;
4685         uint16_t len, size = 0;
4686         int ret;
4687
4688         /* Calculate a huge enough buff for saving response data temporarily */
4689         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4690                                                 I40E_MAX_CAP_ELE_NUM;
4691         buf = rte_zmalloc("i40e", len, 0);
4692         if (!buf) {
4693                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4694                 return I40E_ERR_NO_MEMORY;
4695         }
4696
4697         /* Get, parse the capabilities and save it to hw */
4698         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4699                         i40e_aqc_opc_list_func_capabilities, NULL);
4700         if (ret != I40E_SUCCESS)
4701                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4702
4703         /* Free the temporary buffer after being used */
4704         rte_free(buf);
4705
4706         return ret;
4707 }
4708
4709 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4710
4711 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4712                 const char *value,
4713                 void *opaque)
4714 {
4715         struct i40e_pf *pf;
4716         unsigned long num;
4717         char *end;
4718
4719         pf = (struct i40e_pf *)opaque;
4720         RTE_SET_USED(key);
4721
4722         errno = 0;
4723         num = strtoul(value, &end, 0);
4724         if (errno != 0 || end == value || *end != 0) {
4725                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4726                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4727                 return -(EINVAL);
4728         }
4729
4730         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4731                 pf->vf_nb_qp_max = (uint16_t)num;
4732         else
4733                 /* here return 0 to make next valid same argument work */
4734                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4735                             "power of 2 and equal or less than 16 !, Now it is "
4736                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4737
4738         return 0;
4739 }
4740
4741 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4742 {
4743         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4744         struct rte_kvargs *kvlist;
4745         int kvargs_count;
4746
4747         /* set default queue number per VF as 4 */
4748         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4749
4750         if (dev->device->devargs == NULL)
4751                 return 0;
4752
4753         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4754         if (kvlist == NULL)
4755                 return -(EINVAL);
4756
4757         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4758         if (!kvargs_count) {
4759                 rte_kvargs_free(kvlist);
4760                 return 0;
4761         }
4762
4763         if (kvargs_count > 1)
4764                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4765                             "the first invalid or last valid one is used !",
4766                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4767
4768         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4769                            i40e_pf_parse_vf_queue_number_handler, pf);
4770
4771         rte_kvargs_free(kvlist);
4772
4773         return 0;
4774 }
4775
4776 static int
4777 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4778 {
4779         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4780         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4781         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4782         uint16_t qp_count = 0, vsi_count = 0;
4783
4784         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4785                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4786                 return -EINVAL;
4787         }
4788
4789         i40e_pf_config_vf_rxq_number(dev);
4790
4791         /* Add the parameter init for LFC */
4792         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4793         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4794         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4795
4796         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4797         pf->max_num_vsi = hw->func_caps.num_vsis;
4798         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4799         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4800
4801         /* FDir queue/VSI allocation */
4802         pf->fdir_qp_offset = 0;
4803         if (hw->func_caps.fd) {
4804                 pf->flags |= I40E_FLAG_FDIR;
4805                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4806         } else {
4807                 pf->fdir_nb_qps = 0;
4808         }
4809         qp_count += pf->fdir_nb_qps;
4810         vsi_count += 1;
4811
4812         /* LAN queue/VSI allocation */
4813         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4814         if (!hw->func_caps.rss) {
4815                 pf->lan_nb_qps = 1;
4816         } else {
4817                 pf->flags |= I40E_FLAG_RSS;
4818                 if (hw->mac.type == I40E_MAC_X722)
4819                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4820                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4821         }
4822         qp_count += pf->lan_nb_qps;
4823         vsi_count += 1;
4824
4825         /* VF queue/VSI allocation */
4826         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4827         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4828                 pf->flags |= I40E_FLAG_SRIOV;
4829                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4830                 pf->vf_num = pci_dev->max_vfs;
4831                 PMD_DRV_LOG(DEBUG,
4832                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4833                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4834         } else {
4835                 pf->vf_nb_qps = 0;
4836                 pf->vf_num = 0;
4837         }
4838         qp_count += pf->vf_nb_qps * pf->vf_num;
4839         vsi_count += pf->vf_num;
4840
4841         /* VMDq queue/VSI allocation */
4842         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4843         pf->vmdq_nb_qps = 0;
4844         pf->max_nb_vmdq_vsi = 0;
4845         if (hw->func_caps.vmdq) {
4846                 if (qp_count < hw->func_caps.num_tx_qp &&
4847                         vsi_count < hw->func_caps.num_vsis) {
4848                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4849                                 qp_count) / pf->vmdq_nb_qp_max;
4850
4851                         /* Limit the maximum number of VMDq vsi to the maximum
4852                          * ethdev can support
4853                          */
4854                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4855                                 hw->func_caps.num_vsis - vsi_count);
4856                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4857                                 ETH_64_POOLS);
4858                         if (pf->max_nb_vmdq_vsi) {
4859                                 pf->flags |= I40E_FLAG_VMDQ;
4860                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4861                                 PMD_DRV_LOG(DEBUG,
4862                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4863                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4864                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4865                         } else {
4866                                 PMD_DRV_LOG(INFO,
4867                                         "No enough queues left for VMDq");
4868                         }
4869                 } else {
4870                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4871                 }
4872         }
4873         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4874         vsi_count += pf->max_nb_vmdq_vsi;
4875
4876         if (hw->func_caps.dcb)
4877                 pf->flags |= I40E_FLAG_DCB;
4878
4879         if (qp_count > hw->func_caps.num_tx_qp) {
4880                 PMD_DRV_LOG(ERR,
4881                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4882                         qp_count, hw->func_caps.num_tx_qp);
4883                 return -EINVAL;
4884         }
4885         if (vsi_count > hw->func_caps.num_vsis) {
4886                 PMD_DRV_LOG(ERR,
4887                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4888                         vsi_count, hw->func_caps.num_vsis);
4889                 return -EINVAL;
4890         }
4891
4892         return 0;
4893 }
4894
4895 static int
4896 i40e_pf_get_switch_config(struct i40e_pf *pf)
4897 {
4898         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4899         struct i40e_aqc_get_switch_config_resp *switch_config;
4900         struct i40e_aqc_switch_config_element_resp *element;
4901         uint16_t start_seid = 0, num_reported;
4902         int ret;
4903
4904         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4905                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4906         if (!switch_config) {
4907                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4908                 return -ENOMEM;
4909         }
4910
4911         /* Get the switch configurations */
4912         ret = i40e_aq_get_switch_config(hw, switch_config,
4913                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4914         if (ret != I40E_SUCCESS) {
4915                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4916                 goto fail;
4917         }
4918         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4919         if (num_reported != 1) { /* The number should be 1 */
4920                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4921                 goto fail;
4922         }
4923
4924         /* Parse the switch configuration elements */
4925         element = &(switch_config->element[0]);
4926         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4927                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4928                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4929         } else
4930                 PMD_DRV_LOG(INFO, "Unknown element type");
4931
4932 fail:
4933         rte_free(switch_config);
4934
4935         return ret;
4936 }
4937
4938 static int
4939 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4940                         uint32_t num)
4941 {
4942         struct pool_entry *entry;
4943
4944         if (pool == NULL || num == 0)
4945                 return -EINVAL;
4946
4947         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4948         if (entry == NULL) {
4949                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4950                 return -ENOMEM;
4951         }
4952
4953         /* queue heap initialize */
4954         pool->num_free = num;
4955         pool->num_alloc = 0;
4956         pool->base = base;
4957         LIST_INIT(&pool->alloc_list);
4958         LIST_INIT(&pool->free_list);
4959
4960         /* Initialize element  */
4961         entry->base = 0;
4962         entry->len = num;
4963
4964         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4965         return 0;
4966 }
4967
4968 static void
4969 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4970 {
4971         struct pool_entry *entry, *next_entry;
4972
4973         if (pool == NULL)
4974                 return;
4975
4976         for (entry = LIST_FIRST(&pool->alloc_list);
4977                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4978                         entry = next_entry) {
4979                 LIST_REMOVE(entry, next);
4980                 rte_free(entry);
4981         }
4982
4983         for (entry = LIST_FIRST(&pool->free_list);
4984                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4985                         entry = next_entry) {
4986                 LIST_REMOVE(entry, next);
4987                 rte_free(entry);
4988         }
4989
4990         pool->num_free = 0;
4991         pool->num_alloc = 0;
4992         pool->base = 0;
4993         LIST_INIT(&pool->alloc_list);
4994         LIST_INIT(&pool->free_list);
4995 }
4996
4997 static int
4998 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4999                        uint32_t base)
5000 {
5001         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
5002         uint32_t pool_offset;
5003         uint16_t len;
5004         int insert;
5005
5006         if (pool == NULL) {
5007                 PMD_DRV_LOG(ERR, "Invalid parameter");
5008                 return -EINVAL;
5009         }
5010
5011         pool_offset = base - pool->base;
5012         /* Lookup in alloc list */
5013         LIST_FOREACH(entry, &pool->alloc_list, next) {
5014                 if (entry->base == pool_offset) {
5015                         valid_entry = entry;
5016                         LIST_REMOVE(entry, next);
5017                         break;
5018                 }
5019         }
5020
5021         /* Not find, return */
5022         if (valid_entry == NULL) {
5023                 PMD_DRV_LOG(ERR, "Failed to find entry");
5024                 return -EINVAL;
5025         }
5026
5027         /**
5028          * Found it, move it to free list  and try to merge.
5029          * In order to make merge easier, always sort it by qbase.
5030          * Find adjacent prev and last entries.
5031          */
5032         prev = next = NULL;
5033         LIST_FOREACH(entry, &pool->free_list, next) {
5034                 if (entry->base > valid_entry->base) {
5035                         next = entry;
5036                         break;
5037                 }
5038                 prev = entry;
5039         }
5040
5041         insert = 0;
5042         len = valid_entry->len;
5043         /* Try to merge with next one*/
5044         if (next != NULL) {
5045                 /* Merge with next one */
5046                 if (valid_entry->base + len == next->base) {
5047                         next->base = valid_entry->base;
5048                         next->len += len;
5049                         rte_free(valid_entry);
5050                         valid_entry = next;
5051                         insert = 1;
5052                 }
5053         }
5054
5055         if (prev != NULL) {
5056                 /* Merge with previous one */
5057                 if (prev->base + prev->len == valid_entry->base) {
5058                         prev->len += len;
5059                         /* If it merge with next one, remove next node */
5060                         if (insert == 1) {
5061                                 LIST_REMOVE(valid_entry, next);
5062                                 rte_free(valid_entry);
5063                                 valid_entry = NULL;
5064                         } else {
5065                                 rte_free(valid_entry);
5066                                 valid_entry = NULL;
5067                                 insert = 1;
5068                         }
5069                 }
5070         }
5071
5072         /* Not find any entry to merge, insert */
5073         if (insert == 0) {
5074                 if (prev != NULL)
5075                         LIST_INSERT_AFTER(prev, valid_entry, next);
5076                 else if (next != NULL)
5077                         LIST_INSERT_BEFORE(next, valid_entry, next);
5078                 else /* It's empty list, insert to head */
5079                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5080         }
5081
5082         pool->num_free += len;
5083         pool->num_alloc -= len;
5084
5085         return 0;
5086 }
5087
5088 static int
5089 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5090                        uint16_t num)
5091 {
5092         struct pool_entry *entry, *valid_entry;
5093
5094         if (pool == NULL || num == 0) {
5095                 PMD_DRV_LOG(ERR, "Invalid parameter");
5096                 return -EINVAL;
5097         }
5098
5099         if (pool->num_free < num) {
5100                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5101                             num, pool->num_free);
5102                 return -ENOMEM;
5103         }
5104
5105         valid_entry = NULL;
5106         /* Lookup  in free list and find most fit one */
5107         LIST_FOREACH(entry, &pool->free_list, next) {
5108                 if (entry->len >= num) {
5109                         /* Find best one */
5110                         if (entry->len == num) {
5111                                 valid_entry = entry;
5112                                 break;
5113                         }
5114                         if (valid_entry == NULL || valid_entry->len > entry->len)
5115                                 valid_entry = entry;
5116                 }
5117         }
5118
5119         /* Not find one to satisfy the request, return */
5120         if (valid_entry == NULL) {
5121                 PMD_DRV_LOG(ERR, "No valid entry found");
5122                 return -ENOMEM;
5123         }
5124         /**
5125          * The entry have equal queue number as requested,
5126          * remove it from alloc_list.
5127          */
5128         if (valid_entry->len == num) {
5129                 LIST_REMOVE(valid_entry, next);
5130         } else {
5131                 /**
5132                  * The entry have more numbers than requested,
5133                  * create a new entry for alloc_list and minus its
5134                  * queue base and number in free_list.
5135                  */
5136                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5137                 if (entry == NULL) {
5138                         PMD_DRV_LOG(ERR,
5139                                 "Failed to allocate memory for resource pool");
5140                         return -ENOMEM;
5141                 }
5142                 entry->base = valid_entry->base;
5143                 entry->len = num;
5144                 valid_entry->base += num;
5145                 valid_entry->len -= num;
5146                 valid_entry = entry;
5147         }
5148
5149         /* Insert it into alloc list, not sorted */
5150         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5151
5152         pool->num_free -= valid_entry->len;
5153         pool->num_alloc += valid_entry->len;
5154
5155         return valid_entry->base + pool->base;
5156 }
5157
5158 /**
5159  * bitmap_is_subset - Check whether src2 is subset of src1
5160  **/
5161 static inline int
5162 bitmap_is_subset(uint8_t src1, uint8_t src2)
5163 {
5164         return !((src1 ^ src2) & src2);
5165 }
5166
5167 static enum i40e_status_code
5168 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5169 {
5170         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5171
5172         /* If DCB is not supported, only default TC is supported */
5173         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5174                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5175                 return I40E_NOT_SUPPORTED;
5176         }
5177
5178         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5179                 PMD_DRV_LOG(ERR,
5180                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5181                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5182                 return I40E_NOT_SUPPORTED;
5183         }
5184         return I40E_SUCCESS;
5185 }
5186
5187 int
5188 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5189                                 struct i40e_vsi_vlan_pvid_info *info)
5190 {
5191         struct i40e_hw *hw;
5192         struct i40e_vsi_context ctxt;
5193         uint8_t vlan_flags = 0;
5194         int ret;
5195
5196         if (vsi == NULL || info == NULL) {
5197                 PMD_DRV_LOG(ERR, "invalid parameters");
5198                 return I40E_ERR_PARAM;
5199         }
5200
5201         if (info->on) {
5202                 vsi->info.pvid = info->config.pvid;
5203                 /**
5204                  * If insert pvid is enabled, only tagged pkts are
5205                  * allowed to be sent out.
5206                  */
5207                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5208                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5209         } else {
5210                 vsi->info.pvid = 0;
5211                 if (info->config.reject.tagged == 0)
5212                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5213
5214                 if (info->config.reject.untagged == 0)
5215                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5216         }
5217         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5218                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5219         vsi->info.port_vlan_flags |= vlan_flags;
5220         vsi->info.valid_sections =
5221                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5222         memset(&ctxt, 0, sizeof(ctxt));
5223         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5224         ctxt.seid = vsi->seid;
5225
5226         hw = I40E_VSI_TO_HW(vsi);
5227         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5228         if (ret != I40E_SUCCESS)
5229                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5230
5231         return ret;
5232 }
5233
5234 static int
5235 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5236 {
5237         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5238         int i, ret;
5239         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5240
5241         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5242         if (ret != I40E_SUCCESS)
5243                 return ret;
5244
5245         if (!vsi->seid) {
5246                 PMD_DRV_LOG(ERR, "seid not valid");
5247                 return -EINVAL;
5248         }
5249
5250         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5251         tc_bw_data.tc_valid_bits = enabled_tcmap;
5252         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5253                 tc_bw_data.tc_bw_credits[i] =
5254                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5255
5256         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5257         if (ret != I40E_SUCCESS) {
5258                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5259                 return ret;
5260         }
5261
5262         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5263                                         sizeof(vsi->info.qs_handle));
5264         return I40E_SUCCESS;
5265 }
5266
5267 static enum i40e_status_code
5268 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5269                                  struct i40e_aqc_vsi_properties_data *info,
5270                                  uint8_t enabled_tcmap)
5271 {
5272         enum i40e_status_code ret;
5273         int i, total_tc = 0;
5274         uint16_t qpnum_per_tc, bsf, qp_idx;
5275
5276         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5277         if (ret != I40E_SUCCESS)
5278                 return ret;
5279
5280         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5281                 if (enabled_tcmap & (1 << i))
5282                         total_tc++;
5283         if (total_tc == 0)
5284                 total_tc = 1;
5285         vsi->enabled_tc = enabled_tcmap;
5286
5287         /* Number of queues per enabled TC */
5288         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5289         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5290         bsf = rte_bsf32(qpnum_per_tc);
5291
5292         /* Adjust the queue number to actual queues that can be applied */
5293         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5294                 vsi->nb_qps = qpnum_per_tc * total_tc;
5295
5296         /**
5297          * Configure TC and queue mapping parameters, for enabled TC,
5298          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5299          * default queue will serve it.
5300          */
5301         qp_idx = 0;
5302         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5303                 if (vsi->enabled_tc & (1 << i)) {
5304                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5305                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5306                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5307                         qp_idx += qpnum_per_tc;
5308                 } else
5309                         info->tc_mapping[i] = 0;
5310         }
5311
5312         /* Associate queue number with VSI */
5313         if (vsi->type == I40E_VSI_SRIOV) {
5314                 info->mapping_flags |=
5315                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5316                 for (i = 0; i < vsi->nb_qps; i++)
5317                         info->queue_mapping[i] =
5318                                 rte_cpu_to_le_16(vsi->base_queue + i);
5319         } else {
5320                 info->mapping_flags |=
5321                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5322                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5323         }
5324         info->valid_sections |=
5325                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5326
5327         return I40E_SUCCESS;
5328 }
5329
5330 static int
5331 i40e_veb_release(struct i40e_veb *veb)
5332 {
5333         struct i40e_vsi *vsi;
5334         struct i40e_hw *hw;
5335
5336         if (veb == NULL)
5337                 return -EINVAL;
5338
5339         if (!TAILQ_EMPTY(&veb->head)) {
5340                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5341                 return -EACCES;
5342         }
5343         /* associate_vsi field is NULL for floating VEB */
5344         if (veb->associate_vsi != NULL) {
5345                 vsi = veb->associate_vsi;
5346                 hw = I40E_VSI_TO_HW(vsi);
5347
5348                 vsi->uplink_seid = veb->uplink_seid;
5349                 vsi->veb = NULL;
5350         } else {
5351                 veb->associate_pf->main_vsi->floating_veb = NULL;
5352                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5353         }
5354
5355         i40e_aq_delete_element(hw, veb->seid, NULL);
5356         rte_free(veb);
5357         return I40E_SUCCESS;
5358 }
5359
5360 /* Setup a veb */
5361 static struct i40e_veb *
5362 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5363 {
5364         struct i40e_veb *veb;
5365         int ret;
5366         struct i40e_hw *hw;
5367
5368         if (pf == NULL) {
5369                 PMD_DRV_LOG(ERR,
5370                             "veb setup failed, associated PF shouldn't null");
5371                 return NULL;
5372         }
5373         hw = I40E_PF_TO_HW(pf);
5374
5375         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5376         if (!veb) {
5377                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5378                 goto fail;
5379         }
5380
5381         veb->associate_vsi = vsi;
5382         veb->associate_pf = pf;
5383         TAILQ_INIT(&veb->head);
5384         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5385
5386         /* create floating veb if vsi is NULL */
5387         if (vsi != NULL) {
5388                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5389                                       I40E_DEFAULT_TCMAP, false,
5390                                       &veb->seid, false, NULL);
5391         } else {
5392                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5393                                       true, &veb->seid, false, NULL);
5394         }
5395
5396         if (ret != I40E_SUCCESS) {
5397                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5398                             hw->aq.asq_last_status);
5399                 goto fail;
5400         }
5401         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5402
5403         /* get statistics index */
5404         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5405                                 &veb->stats_idx, NULL, NULL, NULL);
5406         if (ret != I40E_SUCCESS) {
5407                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5408                             hw->aq.asq_last_status);
5409                 goto fail;
5410         }
5411         /* Get VEB bandwidth, to be implemented */
5412         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5413         if (vsi)
5414                 vsi->uplink_seid = veb->seid;
5415
5416         return veb;
5417 fail:
5418         rte_free(veb);
5419         return NULL;
5420 }
5421
5422 int
5423 i40e_vsi_release(struct i40e_vsi *vsi)
5424 {
5425         struct i40e_pf *pf;
5426         struct i40e_hw *hw;
5427         struct i40e_vsi_list *vsi_list;
5428         void *temp;
5429         int ret;
5430         struct i40e_mac_filter *f;
5431         uint16_t user_param;
5432
5433         if (!vsi)
5434                 return I40E_SUCCESS;
5435
5436         if (!vsi->adapter)
5437                 return -EFAULT;
5438
5439         user_param = vsi->user_param;
5440
5441         pf = I40E_VSI_TO_PF(vsi);
5442         hw = I40E_VSI_TO_HW(vsi);
5443
5444         /* VSI has child to attach, release child first */
5445         if (vsi->veb) {
5446                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5447                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5448                                 return -1;
5449                 }
5450                 i40e_veb_release(vsi->veb);
5451         }
5452
5453         if (vsi->floating_veb) {
5454                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5455                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5456                                 return -1;
5457                 }
5458         }
5459
5460         /* Remove all macvlan filters of the VSI */
5461         i40e_vsi_remove_all_macvlan_filter(vsi);
5462         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5463                 rte_free(f);
5464
5465         if (vsi->type != I40E_VSI_MAIN &&
5466             ((vsi->type != I40E_VSI_SRIOV) ||
5467             !pf->floating_veb_list[user_param])) {
5468                 /* Remove vsi from parent's sibling list */
5469                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5470                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5471                         return I40E_ERR_PARAM;
5472                 }
5473                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5474                                 &vsi->sib_vsi_list, list);
5475
5476                 /* Remove all switch element of the VSI */
5477                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5478                 if (ret != I40E_SUCCESS)
5479                         PMD_DRV_LOG(ERR, "Failed to delete element");
5480         }
5481
5482         if ((vsi->type == I40E_VSI_SRIOV) &&
5483             pf->floating_veb_list[user_param]) {
5484                 /* Remove vsi from parent's sibling list */
5485                 if (vsi->parent_vsi == NULL ||
5486                     vsi->parent_vsi->floating_veb == NULL) {
5487                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5488                         return I40E_ERR_PARAM;
5489                 }
5490                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5491                              &vsi->sib_vsi_list, list);
5492
5493                 /* Remove all switch element of the VSI */
5494                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5495                 if (ret != I40E_SUCCESS)
5496                         PMD_DRV_LOG(ERR, "Failed to delete element");
5497         }
5498
5499         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5500
5501         if (vsi->type != I40E_VSI_SRIOV)
5502                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5503         rte_free(vsi);
5504
5505         return I40E_SUCCESS;
5506 }
5507
5508 static int
5509 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5510 {
5511         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5512         struct i40e_aqc_remove_macvlan_element_data def_filter;
5513         struct i40e_mac_filter_info filter;
5514         int ret;
5515
5516         if (vsi->type != I40E_VSI_MAIN)
5517                 return I40E_ERR_CONFIG;
5518         memset(&def_filter, 0, sizeof(def_filter));
5519         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5520                                         ETH_ADDR_LEN);
5521         def_filter.vlan_tag = 0;
5522         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5523                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5524         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5525         if (ret != I40E_SUCCESS) {
5526                 struct i40e_mac_filter *f;
5527                 struct rte_ether_addr *mac;
5528
5529                 PMD_DRV_LOG(DEBUG,
5530                             "Cannot remove the default macvlan filter");
5531                 /* It needs to add the permanent mac into mac list */
5532                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5533                 if (f == NULL) {
5534                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5535                         return I40E_ERR_NO_MEMORY;
5536                 }
5537                 mac = &f->mac_info.mac_addr;
5538                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5539                                 ETH_ADDR_LEN);
5540                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5541                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5542                 vsi->mac_num++;
5543
5544                 return ret;
5545         }
5546         rte_memcpy(&filter.mac_addr,
5547                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5548         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5549         return i40e_vsi_add_mac(vsi, &filter);
5550 }
5551
5552 /*
5553  * i40e_vsi_get_bw_config - Query VSI BW Information
5554  * @vsi: the VSI to be queried
5555  *
5556  * Returns 0 on success, negative value on failure
5557  */
5558 static enum i40e_status_code
5559 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5560 {
5561         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5562         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5563         struct i40e_hw *hw = &vsi->adapter->hw;
5564         i40e_status ret;
5565         int i;
5566         uint32_t bw_max;
5567
5568         memset(&bw_config, 0, sizeof(bw_config));
5569         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5570         if (ret != I40E_SUCCESS) {
5571                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5572                             hw->aq.asq_last_status);
5573                 return ret;
5574         }
5575
5576         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5577         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5578                                         &ets_sla_config, NULL);
5579         if (ret != I40E_SUCCESS) {
5580                 PMD_DRV_LOG(ERR,
5581                         "VSI failed to get TC bandwdith configuration %u",
5582                         hw->aq.asq_last_status);
5583                 return ret;
5584         }
5585
5586         /* store and print out BW info */
5587         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5588         vsi->bw_info.bw_max = bw_config.max_bw;
5589         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5590         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5591         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5592                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5593                      I40E_16_BIT_WIDTH);
5594         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5595                 vsi->bw_info.bw_ets_share_credits[i] =
5596                                 ets_sla_config.share_credits[i];
5597                 vsi->bw_info.bw_ets_credits[i] =
5598                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5599                 /* 4 bits per TC, 4th bit is reserved */
5600                 vsi->bw_info.bw_ets_max[i] =
5601                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5602                                   RTE_LEN2MASK(3, uint8_t));
5603                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5604                             vsi->bw_info.bw_ets_share_credits[i]);
5605                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5606                             vsi->bw_info.bw_ets_credits[i]);
5607                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5608                             vsi->bw_info.bw_ets_max[i]);
5609         }
5610
5611         return I40E_SUCCESS;
5612 }
5613
5614 /* i40e_enable_pf_lb
5615  * @pf: pointer to the pf structure
5616  *
5617  * allow loopback on pf
5618  */
5619 static inline void
5620 i40e_enable_pf_lb(struct i40e_pf *pf)
5621 {
5622         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5623         struct i40e_vsi_context ctxt;
5624         int ret;
5625
5626         /* Use the FW API if FW >= v5.0 */
5627         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5628                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5629                 return;
5630         }
5631
5632         memset(&ctxt, 0, sizeof(ctxt));
5633         ctxt.seid = pf->main_vsi_seid;
5634         ctxt.pf_num = hw->pf_id;
5635         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5636         if (ret) {
5637                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5638                             ret, hw->aq.asq_last_status);
5639                 return;
5640         }
5641         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5642         ctxt.info.valid_sections =
5643                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5644         ctxt.info.switch_id |=
5645                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5646
5647         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5648         if (ret)
5649                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5650                             hw->aq.asq_last_status);
5651 }
5652
5653 /* Setup a VSI */
5654 struct i40e_vsi *
5655 i40e_vsi_setup(struct i40e_pf *pf,
5656                enum i40e_vsi_type type,
5657                struct i40e_vsi *uplink_vsi,
5658                uint16_t user_param)
5659 {
5660         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5661         struct i40e_vsi *vsi;
5662         struct i40e_mac_filter_info filter;
5663         int ret;
5664         struct i40e_vsi_context ctxt;
5665         struct rte_ether_addr broadcast =
5666                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5667
5668         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5669             uplink_vsi == NULL) {
5670                 PMD_DRV_LOG(ERR,
5671                         "VSI setup failed, VSI link shouldn't be NULL");
5672                 return NULL;
5673         }
5674
5675         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5676                 PMD_DRV_LOG(ERR,
5677                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5678                 return NULL;
5679         }
5680
5681         /* two situations
5682          * 1.type is not MAIN and uplink vsi is not NULL
5683          * If uplink vsi didn't setup VEB, create one first under veb field
5684          * 2.type is SRIOV and the uplink is NULL
5685          * If floating VEB is NULL, create one veb under floating veb field
5686          */
5687
5688         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5689             uplink_vsi->veb == NULL) {
5690                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5691
5692                 if (uplink_vsi->veb == NULL) {
5693                         PMD_DRV_LOG(ERR, "VEB setup failed");
5694                         return NULL;
5695                 }
5696                 /* set ALLOWLOOPBACk on pf, when veb is created */
5697                 i40e_enable_pf_lb(pf);
5698         }
5699
5700         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5701             pf->main_vsi->floating_veb == NULL) {
5702                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5703
5704                 if (pf->main_vsi->floating_veb == NULL) {
5705                         PMD_DRV_LOG(ERR, "VEB setup failed");
5706                         return NULL;
5707                 }
5708         }
5709
5710         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5711         if (!vsi) {
5712                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5713                 return NULL;
5714         }
5715         TAILQ_INIT(&vsi->mac_list);
5716         vsi->type = type;
5717         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5718         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5719         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5720         vsi->user_param = user_param;
5721         vsi->vlan_anti_spoof_on = 0;
5722         vsi->vlan_filter_on = 0;
5723         /* Allocate queues */
5724         switch (vsi->type) {
5725         case I40E_VSI_MAIN  :
5726                 vsi->nb_qps = pf->lan_nb_qps;
5727                 break;
5728         case I40E_VSI_SRIOV :
5729                 vsi->nb_qps = pf->vf_nb_qps;
5730                 break;
5731         case I40E_VSI_VMDQ2:
5732                 vsi->nb_qps = pf->vmdq_nb_qps;
5733                 break;
5734         case I40E_VSI_FDIR:
5735                 vsi->nb_qps = pf->fdir_nb_qps;
5736                 break;
5737         default:
5738                 goto fail_mem;
5739         }
5740         /*
5741          * The filter status descriptor is reported in rx queue 0,
5742          * while the tx queue for fdir filter programming has no
5743          * such constraints, can be non-zero queues.
5744          * To simplify it, choose FDIR vsi use queue 0 pair.
5745          * To make sure it will use queue 0 pair, queue allocation
5746          * need be done before this function is called
5747          */
5748         if (type != I40E_VSI_FDIR) {
5749                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5750                         if (ret < 0) {
5751                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5752                                                 vsi->seid, ret);
5753                                 goto fail_mem;
5754                         }
5755                         vsi->base_queue = ret;
5756         } else
5757                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5758
5759         /* VF has MSIX interrupt in VF range, don't allocate here */
5760         if (type == I40E_VSI_MAIN) {
5761                 if (pf->support_multi_driver) {
5762                         /* If support multi-driver, need to use INT0 instead of
5763                          * allocating from msix pool. The Msix pool is init from
5764                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5765                          * to 1 without calling i40e_res_pool_alloc.
5766                          */
5767                         vsi->msix_intr = 0;
5768                         vsi->nb_msix = 1;
5769                 } else {
5770                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5771                                                   RTE_MIN(vsi->nb_qps,
5772                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5773                         if (ret < 0) {
5774                                 PMD_DRV_LOG(ERR,
5775                                             "VSI MAIN %d get heap failed %d",
5776                                             vsi->seid, ret);
5777                                 goto fail_queue_alloc;
5778                         }
5779                         vsi->msix_intr = ret;
5780                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5781                                                RTE_MAX_RXTX_INTR_VEC_ID);
5782                 }
5783         } else if (type != I40E_VSI_SRIOV) {
5784                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5785                 if (ret < 0) {
5786                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5787                         if (type != I40E_VSI_FDIR)
5788                                 goto fail_queue_alloc;
5789                         vsi->msix_intr = 0;
5790                         vsi->nb_msix = 0;
5791                 } else {
5792                         vsi->msix_intr = ret;
5793                         vsi->nb_msix = 1;
5794                 }
5795         } else {
5796                 vsi->msix_intr = 0;
5797                 vsi->nb_msix = 0;
5798         }
5799
5800         /* Add VSI */
5801         if (type == I40E_VSI_MAIN) {
5802                 /* For main VSI, no need to add since it's default one */
5803                 vsi->uplink_seid = pf->mac_seid;
5804                 vsi->seid = pf->main_vsi_seid;
5805                 /* Bind queues with specific MSIX interrupt */
5806                 /**
5807                  * Needs 2 interrupt at least, one for misc cause which will
5808                  * enabled from OS side, Another for queues binding the
5809                  * interrupt from device side only.
5810                  */
5811
5812                 /* Get default VSI parameters from hardware */
5813                 memset(&ctxt, 0, sizeof(ctxt));
5814                 ctxt.seid = vsi->seid;
5815                 ctxt.pf_num = hw->pf_id;
5816                 ctxt.uplink_seid = vsi->uplink_seid;
5817                 ctxt.vf_num = 0;
5818                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5819                 if (ret != I40E_SUCCESS) {
5820                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5821                         goto fail_msix_alloc;
5822                 }
5823                 rte_memcpy(&vsi->info, &ctxt.info,
5824                         sizeof(struct i40e_aqc_vsi_properties_data));
5825                 vsi->vsi_id = ctxt.vsi_number;
5826                 vsi->info.valid_sections = 0;
5827
5828                 /* Configure tc, enabled TC0 only */
5829                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5830                         I40E_SUCCESS) {
5831                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5832                         goto fail_msix_alloc;
5833                 }
5834
5835                 /* TC, queue mapping */
5836                 memset(&ctxt, 0, sizeof(ctxt));
5837                 vsi->info.valid_sections |=
5838                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5839                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5840                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5841                 rte_memcpy(&ctxt.info, &vsi->info,
5842                         sizeof(struct i40e_aqc_vsi_properties_data));
5843                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5844                                                 I40E_DEFAULT_TCMAP);
5845                 if (ret != I40E_SUCCESS) {
5846                         PMD_DRV_LOG(ERR,
5847                                 "Failed to configure TC queue mapping");
5848                         goto fail_msix_alloc;
5849                 }
5850                 ctxt.seid = vsi->seid;
5851                 ctxt.pf_num = hw->pf_id;
5852                 ctxt.uplink_seid = vsi->uplink_seid;
5853                 ctxt.vf_num = 0;
5854
5855                 /* Update VSI parameters */
5856                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5857                 if (ret != I40E_SUCCESS) {
5858                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5859                         goto fail_msix_alloc;
5860                 }
5861
5862                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5863                                                 sizeof(vsi->info.tc_mapping));
5864                 rte_memcpy(&vsi->info.queue_mapping,
5865                                 &ctxt.info.queue_mapping,
5866                         sizeof(vsi->info.queue_mapping));
5867                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5868                 vsi->info.valid_sections = 0;
5869
5870                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5871                                 ETH_ADDR_LEN);
5872
5873                 /**
5874                  * Updating default filter settings are necessary to prevent
5875                  * reception of tagged packets.
5876                  * Some old firmware configurations load a default macvlan
5877                  * filter which accepts both tagged and untagged packets.
5878                  * The updating is to use a normal filter instead if needed.
5879                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5880                  * The firmware with correct configurations load the default
5881                  * macvlan filter which is expected and cannot be removed.
5882                  */
5883                 i40e_update_default_filter_setting(vsi);
5884                 i40e_config_qinq(hw, vsi);
5885         } else if (type == I40E_VSI_SRIOV) {
5886                 memset(&ctxt, 0, sizeof(ctxt));
5887                 /**
5888                  * For other VSI, the uplink_seid equals to uplink VSI's
5889                  * uplink_seid since they share same VEB
5890                  */
5891                 if (uplink_vsi == NULL)
5892                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5893                 else
5894                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5895                 ctxt.pf_num = hw->pf_id;
5896                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5897                 ctxt.uplink_seid = vsi->uplink_seid;
5898                 ctxt.connection_type = 0x1;
5899                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5900
5901                 /* Use the VEB configuration if FW >= v5.0 */
5902                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5903                         /* Configure switch ID */
5904                         ctxt.info.valid_sections |=
5905                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5906                         ctxt.info.switch_id =
5907                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5908                 }
5909
5910                 /* Configure port/vlan */
5911                 ctxt.info.valid_sections |=
5912                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5913                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5914                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5915                                                 hw->func_caps.enabled_tcmap);
5916                 if (ret != I40E_SUCCESS) {
5917                         PMD_DRV_LOG(ERR,
5918                                 "Failed to configure TC queue mapping");
5919                         goto fail_msix_alloc;
5920                 }
5921
5922                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5923                 ctxt.info.valid_sections |=
5924                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5925                 /**
5926                  * Since VSI is not created yet, only configure parameter,
5927                  * will add vsi below.
5928                  */
5929
5930                 i40e_config_qinq(hw, vsi);
5931         } else if (type == I40E_VSI_VMDQ2) {
5932                 memset(&ctxt, 0, sizeof(ctxt));
5933                 /*
5934                  * For other VSI, the uplink_seid equals to uplink VSI's
5935                  * uplink_seid since they share same VEB
5936                  */
5937                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5938                 ctxt.pf_num = hw->pf_id;
5939                 ctxt.vf_num = 0;
5940                 ctxt.uplink_seid = vsi->uplink_seid;
5941                 ctxt.connection_type = 0x1;
5942                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5943
5944                 ctxt.info.valid_sections |=
5945                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5946                 /* user_param carries flag to enable loop back */
5947                 if (user_param) {
5948                         ctxt.info.switch_id =
5949                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5950                         ctxt.info.switch_id |=
5951                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5952                 }
5953
5954                 /* Configure port/vlan */
5955                 ctxt.info.valid_sections |=
5956                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5957                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5958                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5959                                                 I40E_DEFAULT_TCMAP);
5960                 if (ret != I40E_SUCCESS) {
5961                         PMD_DRV_LOG(ERR,
5962                                 "Failed to configure TC queue mapping");
5963                         goto fail_msix_alloc;
5964                 }
5965                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5966                 ctxt.info.valid_sections |=
5967                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5968         } else if (type == I40E_VSI_FDIR) {
5969                 memset(&ctxt, 0, sizeof(ctxt));
5970                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5971                 ctxt.pf_num = hw->pf_id;
5972                 ctxt.vf_num = 0;
5973                 ctxt.uplink_seid = vsi->uplink_seid;
5974                 ctxt.connection_type = 0x1;     /* regular data port */
5975                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5976                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5977                                                 I40E_DEFAULT_TCMAP);
5978                 if (ret != I40E_SUCCESS) {
5979                         PMD_DRV_LOG(ERR,
5980                                 "Failed to configure TC queue mapping.");
5981                         goto fail_msix_alloc;
5982                 }
5983                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5984                 ctxt.info.valid_sections |=
5985                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5986         } else {
5987                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5988                 goto fail_msix_alloc;
5989         }
5990
5991         if (vsi->type != I40E_VSI_MAIN) {
5992                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5993                 if (ret != I40E_SUCCESS) {
5994                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5995                                     hw->aq.asq_last_status);
5996                         goto fail_msix_alloc;
5997                 }
5998                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5999                 vsi->info.valid_sections = 0;
6000                 vsi->seid = ctxt.seid;
6001                 vsi->vsi_id = ctxt.vsi_number;
6002                 vsi->sib_vsi_list.vsi = vsi;
6003                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
6004                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6005                                           &vsi->sib_vsi_list, list);
6006                 } else {
6007                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6008                                           &vsi->sib_vsi_list, list);
6009                 }
6010         }
6011
6012         /* MAC/VLAN configuration */
6013         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6014         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
6015
6016         ret = i40e_vsi_add_mac(vsi, &filter);
6017         if (ret != I40E_SUCCESS) {
6018                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6019                 goto fail_msix_alloc;
6020         }
6021
6022         /* Get VSI BW information */
6023         i40e_vsi_get_bw_config(vsi);
6024         return vsi;
6025 fail_msix_alloc:
6026         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6027 fail_queue_alloc:
6028         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6029 fail_mem:
6030         rte_free(vsi);
6031         return NULL;
6032 }
6033
6034 /* Configure vlan filter on or off */
6035 int
6036 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6037 {
6038         int i, num;
6039         struct i40e_mac_filter *f;
6040         void *temp;
6041         struct i40e_mac_filter_info *mac_filter;
6042         enum rte_mac_filter_type desired_filter;
6043         int ret = I40E_SUCCESS;
6044
6045         if (on) {
6046                 /* Filter to match MAC and VLAN */
6047                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
6048         } else {
6049                 /* Filter to match only MAC */
6050                 desired_filter = RTE_MAC_PERFECT_MATCH;
6051         }
6052
6053         num = vsi->mac_num;
6054
6055         mac_filter = rte_zmalloc("mac_filter_info_data",
6056                                  num * sizeof(*mac_filter), 0);
6057         if (mac_filter == NULL) {
6058                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6059                 return I40E_ERR_NO_MEMORY;
6060         }
6061
6062         i = 0;
6063
6064         /* Remove all existing mac */
6065         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6066                 mac_filter[i] = f->mac_info;
6067                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6068                 if (ret) {
6069                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6070                                     on ? "enable" : "disable");
6071                         goto DONE;
6072                 }
6073                 i++;
6074         }
6075
6076         /* Override with new filter */
6077         for (i = 0; i < num; i++) {
6078                 mac_filter[i].filter_type = desired_filter;
6079                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6080                 if (ret) {
6081                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6082                                     on ? "enable" : "disable");
6083                         goto DONE;
6084                 }
6085         }
6086
6087 DONE:
6088         rte_free(mac_filter);
6089         return ret;
6090 }
6091
6092 /* Configure vlan stripping on or off */
6093 int
6094 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6095 {
6096         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6097         struct i40e_vsi_context ctxt;
6098         uint8_t vlan_flags;
6099         int ret = I40E_SUCCESS;
6100
6101         /* Check if it has been already on or off */
6102         if (vsi->info.valid_sections &
6103                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6104                 if (on) {
6105                         if ((vsi->info.port_vlan_flags &
6106                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6107                                 return 0; /* already on */
6108                 } else {
6109                         if ((vsi->info.port_vlan_flags &
6110                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6111                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6112                                 return 0; /* already off */
6113                 }
6114         }
6115
6116         if (on)
6117                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6118         else
6119                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6120         vsi->info.valid_sections =
6121                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6122         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6123         vsi->info.port_vlan_flags |= vlan_flags;
6124         ctxt.seid = vsi->seid;
6125         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6126         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6127         if (ret)
6128                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6129                             on ? "enable" : "disable");
6130
6131         return ret;
6132 }
6133
6134 static int
6135 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6136 {
6137         struct rte_eth_dev_data *data = dev->data;
6138         int ret;
6139         int mask = 0;
6140
6141         /* Apply vlan offload setting */
6142         mask = ETH_VLAN_STRIP_MASK |
6143                ETH_QINQ_STRIP_MASK |
6144                ETH_VLAN_FILTER_MASK |
6145                ETH_VLAN_EXTEND_MASK;
6146         ret = i40e_vlan_offload_set(dev, mask);
6147         if (ret) {
6148                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6149                 return ret;
6150         }
6151
6152         /* Apply pvid setting */
6153         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6154                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6155         if (ret)
6156                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6157
6158         return ret;
6159 }
6160
6161 static int
6162 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6163 {
6164         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6165
6166         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6167 }
6168
6169 static int
6170 i40e_update_flow_control(struct i40e_hw *hw)
6171 {
6172 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6173         struct i40e_link_status link_status;
6174         uint32_t rxfc = 0, txfc = 0, reg;
6175         uint8_t an_info;
6176         int ret;
6177
6178         memset(&link_status, 0, sizeof(link_status));
6179         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6180         if (ret != I40E_SUCCESS) {
6181                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6182                 goto write_reg; /* Disable flow control */
6183         }
6184
6185         an_info = hw->phy.link_info.an_info;
6186         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6187                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6188                 ret = I40E_ERR_NOT_READY;
6189                 goto write_reg; /* Disable flow control */
6190         }
6191         /**
6192          * If link auto negotiation is enabled, flow control needs to
6193          * be configured according to it
6194          */
6195         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6196         case I40E_LINK_PAUSE_RXTX:
6197                 rxfc = 1;
6198                 txfc = 1;
6199                 hw->fc.current_mode = I40E_FC_FULL;
6200                 break;
6201         case I40E_AQ_LINK_PAUSE_RX:
6202                 rxfc = 1;
6203                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6204                 break;
6205         case I40E_AQ_LINK_PAUSE_TX:
6206                 txfc = 1;
6207                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6208                 break;
6209         default:
6210                 hw->fc.current_mode = I40E_FC_NONE;
6211                 break;
6212         }
6213
6214 write_reg:
6215         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6216                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6217         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6218         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6219         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6220         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6221
6222         return ret;
6223 }
6224
6225 /* PF setup */
6226 static int
6227 i40e_pf_setup(struct i40e_pf *pf)
6228 {
6229         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6230         struct i40e_filter_control_settings settings;
6231         struct i40e_vsi *vsi;
6232         int ret;
6233
6234         /* Clear all stats counters */
6235         pf->offset_loaded = FALSE;
6236         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6237         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6238         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6239         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6240
6241         ret = i40e_pf_get_switch_config(pf);
6242         if (ret != I40E_SUCCESS) {
6243                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6244                 return ret;
6245         }
6246
6247         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6248         if (ret)
6249                 PMD_INIT_LOG(WARNING,
6250                         "failed to allocate switch domain for device %d", ret);
6251
6252         if (pf->flags & I40E_FLAG_FDIR) {
6253                 /* make queue allocated first, let FDIR use queue pair 0*/
6254                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6255                 if (ret != I40E_FDIR_QUEUE_ID) {
6256                         PMD_DRV_LOG(ERR,
6257                                 "queue allocation fails for FDIR: ret =%d",
6258                                 ret);
6259                         pf->flags &= ~I40E_FLAG_FDIR;
6260                 }
6261         }
6262         /*  main VSI setup */
6263         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6264         if (!vsi) {
6265                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6266                 return I40E_ERR_NOT_READY;
6267         }
6268         pf->main_vsi = vsi;
6269
6270         /* Configure filter control */
6271         memset(&settings, 0, sizeof(settings));
6272         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6273                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6274         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6275                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6276         else {
6277                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6278                         hw->func_caps.rss_table_size);
6279                 return I40E_ERR_PARAM;
6280         }
6281         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6282                 hw->func_caps.rss_table_size);
6283         pf->hash_lut_size = hw->func_caps.rss_table_size;
6284
6285         /* Enable ethtype and macvlan filters */
6286         settings.enable_ethtype = TRUE;
6287         settings.enable_macvlan = TRUE;
6288         ret = i40e_set_filter_control(hw, &settings);
6289         if (ret)
6290                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6291                                                                 ret);
6292
6293         /* Update flow control according to the auto negotiation */
6294         i40e_update_flow_control(hw);
6295
6296         return I40E_SUCCESS;
6297 }
6298
6299 int
6300 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6301 {
6302         uint32_t reg;
6303         uint16_t j;
6304
6305         /**
6306          * Set or clear TX Queue Disable flags,
6307          * which is required by hardware.
6308          */
6309         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6310         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6311
6312         /* Wait until the request is finished */
6313         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6314                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6315                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6316                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6317                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6318                                                         & 0x1))) {
6319                         break;
6320                 }
6321         }
6322         if (on) {
6323                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6324                         return I40E_SUCCESS; /* already on, skip next steps */
6325
6326                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6327                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6328         } else {
6329                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6330                         return I40E_SUCCESS; /* already off, skip next steps */
6331                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6332         }
6333         /* Write the register */
6334         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6335         /* Check the result */
6336         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6337                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6338                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6339                 if (on) {
6340                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6341                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6342                                 break;
6343                 } else {
6344                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6345                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6346                                 break;
6347                 }
6348         }
6349         /* Check if it is timeout */
6350         if (j >= I40E_CHK_Q_ENA_COUNT) {
6351                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6352                             (on ? "enable" : "disable"), q_idx);
6353                 return I40E_ERR_TIMEOUT;
6354         }
6355
6356         return I40E_SUCCESS;
6357 }
6358
6359 int
6360 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6361 {
6362         uint32_t reg;
6363         uint16_t j;
6364
6365         /* Wait until the request is finished */
6366         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6367                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6368                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6369                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6370                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6371                         break;
6372         }
6373
6374         if (on) {
6375                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6376                         return I40E_SUCCESS; /* Already on, skip next steps */
6377                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6378         } else {
6379                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6380                         return I40E_SUCCESS; /* Already off, skip next steps */
6381                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6382         }
6383
6384         /* Write the register */
6385         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6386         /* Check the result */
6387         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6388                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6389                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6390                 if (on) {
6391                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6392                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6393                                 break;
6394                 } else {
6395                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6396                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6397                                 break;
6398                 }
6399         }
6400
6401         /* Check if it is timeout */
6402         if (j >= I40E_CHK_Q_ENA_COUNT) {
6403                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6404                             (on ? "enable" : "disable"), q_idx);
6405                 return I40E_ERR_TIMEOUT;
6406         }
6407
6408         return I40E_SUCCESS;
6409 }
6410
6411 /* Initialize VSI for TX */
6412 static int
6413 i40e_dev_tx_init(struct i40e_pf *pf)
6414 {
6415         struct rte_eth_dev_data *data = pf->dev_data;
6416         uint16_t i;
6417         uint32_t ret = I40E_SUCCESS;
6418         struct i40e_tx_queue *txq;
6419
6420         for (i = 0; i < data->nb_tx_queues; i++) {
6421                 txq = data->tx_queues[i];
6422                 if (!txq || !txq->q_set)
6423                         continue;
6424                 ret = i40e_tx_queue_init(txq);
6425                 if (ret != I40E_SUCCESS)
6426                         break;
6427         }
6428         if (ret == I40E_SUCCESS)
6429                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6430                                      ->eth_dev);
6431
6432         return ret;
6433 }
6434
6435 /* Initialize VSI for RX */
6436 static int
6437 i40e_dev_rx_init(struct i40e_pf *pf)
6438 {
6439         struct rte_eth_dev_data *data = pf->dev_data;
6440         int ret = I40E_SUCCESS;
6441         uint16_t i;
6442         struct i40e_rx_queue *rxq;
6443
6444         i40e_pf_config_rss(pf);
6445         for (i = 0; i < data->nb_rx_queues; i++) {
6446                 rxq = data->rx_queues[i];
6447                 if (!rxq || !rxq->q_set)
6448                         continue;
6449
6450                 ret = i40e_rx_queue_init(rxq);
6451                 if (ret != I40E_SUCCESS) {
6452                         PMD_DRV_LOG(ERR,
6453                                 "Failed to do RX queue initialization");
6454                         break;
6455                 }
6456         }
6457         if (ret == I40E_SUCCESS)
6458                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6459                                      ->eth_dev);
6460
6461         return ret;
6462 }
6463
6464 static int
6465 i40e_dev_rxtx_init(struct i40e_pf *pf)
6466 {
6467         int err;
6468
6469         err = i40e_dev_tx_init(pf);
6470         if (err) {
6471                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6472                 return err;
6473         }
6474         err = i40e_dev_rx_init(pf);
6475         if (err) {
6476                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6477                 return err;
6478         }
6479
6480         return err;
6481 }
6482
6483 static int
6484 i40e_vmdq_setup(struct rte_eth_dev *dev)
6485 {
6486         struct rte_eth_conf *conf = &dev->data->dev_conf;
6487         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6488         int i, err, conf_vsis, j, loop;
6489         struct i40e_vsi *vsi;
6490         struct i40e_vmdq_info *vmdq_info;
6491         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6492         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6493
6494         /*
6495          * Disable interrupt to avoid message from VF. Furthermore, it will
6496          * avoid race condition in VSI creation/destroy.
6497          */
6498         i40e_pf_disable_irq0(hw);
6499
6500         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6501                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6502                 return -ENOTSUP;
6503         }
6504
6505         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6506         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6507                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6508                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6509                         pf->max_nb_vmdq_vsi);
6510                 return -ENOTSUP;
6511         }
6512
6513         if (pf->vmdq != NULL) {
6514                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6515                 return 0;
6516         }
6517
6518         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6519                                 sizeof(*vmdq_info) * conf_vsis, 0);
6520
6521         if (pf->vmdq == NULL) {
6522                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6523                 return -ENOMEM;
6524         }
6525
6526         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6527
6528         /* Create VMDQ VSI */
6529         for (i = 0; i < conf_vsis; i++) {
6530                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6531                                 vmdq_conf->enable_loop_back);
6532                 if (vsi == NULL) {
6533                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6534                         err = -1;
6535                         goto err_vsi_setup;
6536                 }
6537                 vmdq_info = &pf->vmdq[i];
6538                 vmdq_info->pf = pf;
6539                 vmdq_info->vsi = vsi;
6540         }
6541         pf->nb_cfg_vmdq_vsi = conf_vsis;
6542
6543         /* Configure Vlan */
6544         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6545         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6546                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6547                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6548                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6549                                         vmdq_conf->pool_map[i].vlan_id, j);
6550
6551                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6552                                                 vmdq_conf->pool_map[i].vlan_id);
6553                                 if (err) {
6554                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6555                                         err = -1;
6556                                         goto err_vsi_setup;
6557                                 }
6558                         }
6559                 }
6560         }
6561
6562         i40e_pf_enable_irq0(hw);
6563
6564         return 0;
6565
6566 err_vsi_setup:
6567         for (i = 0; i < conf_vsis; i++)
6568                 if (pf->vmdq[i].vsi == NULL)
6569                         break;
6570                 else
6571                         i40e_vsi_release(pf->vmdq[i].vsi);
6572
6573         rte_free(pf->vmdq);
6574         pf->vmdq = NULL;
6575         i40e_pf_enable_irq0(hw);
6576         return err;
6577 }
6578
6579 static void
6580 i40e_stat_update_32(struct i40e_hw *hw,
6581                    uint32_t reg,
6582                    bool offset_loaded,
6583                    uint64_t *offset,
6584                    uint64_t *stat)
6585 {
6586         uint64_t new_data;
6587
6588         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6589         if (!offset_loaded)
6590                 *offset = new_data;
6591
6592         if (new_data >= *offset)
6593                 *stat = (uint64_t)(new_data - *offset);
6594         else
6595                 *stat = (uint64_t)((new_data +
6596                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6597 }
6598
6599 static void
6600 i40e_stat_update_48(struct i40e_hw *hw,
6601                    uint32_t hireg,
6602                    uint32_t loreg,
6603                    bool offset_loaded,
6604                    uint64_t *offset,
6605                    uint64_t *stat)
6606 {
6607         uint64_t new_data;
6608
6609         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6610         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6611                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6612
6613         if (!offset_loaded)
6614                 *offset = new_data;
6615
6616         if (new_data >= *offset)
6617                 *stat = new_data - *offset;
6618         else
6619                 *stat = (uint64_t)((new_data +
6620                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6621
6622         *stat &= I40E_48_BIT_MASK;
6623 }
6624
6625 /* Disable IRQ0 */
6626 void
6627 i40e_pf_disable_irq0(struct i40e_hw *hw)
6628 {
6629         /* Disable all interrupt types */
6630         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6631                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6632         I40E_WRITE_FLUSH(hw);
6633 }
6634
6635 /* Enable IRQ0 */
6636 void
6637 i40e_pf_enable_irq0(struct i40e_hw *hw)
6638 {
6639         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6640                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6641                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6642                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6643         I40E_WRITE_FLUSH(hw);
6644 }
6645
6646 static void
6647 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6648 {
6649         /* read pending request and disable first */
6650         i40e_pf_disable_irq0(hw);
6651         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6652         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6653                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6654
6655         if (no_queue)
6656                 /* Link no queues with irq0 */
6657                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6658                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6659 }
6660
6661 static void
6662 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6663 {
6664         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6665         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6666         int i;
6667         uint16_t abs_vf_id;
6668         uint32_t index, offset, val;
6669
6670         if (!pf->vfs)
6671                 return;
6672         /**
6673          * Try to find which VF trigger a reset, use absolute VF id to access
6674          * since the reg is global register.
6675          */
6676         for (i = 0; i < pf->vf_num; i++) {
6677                 abs_vf_id = hw->func_caps.vf_base_id + i;
6678                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6679                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6680                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6681                 /* VFR event occurred */
6682                 if (val & (0x1 << offset)) {
6683                         int ret;
6684
6685                         /* Clear the event first */
6686                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6687                                                         (0x1 << offset));
6688                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6689                         /**
6690                          * Only notify a VF reset event occurred,
6691                          * don't trigger another SW reset
6692                          */
6693                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6694                         if (ret != I40E_SUCCESS)
6695                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6696                 }
6697         }
6698 }
6699
6700 static void
6701 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6702 {
6703         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6704         int i;
6705
6706         for (i = 0; i < pf->vf_num; i++)
6707                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6708 }
6709
6710 static void
6711 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6712 {
6713         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6714         struct i40e_arq_event_info info;
6715         uint16_t pending, opcode;
6716         int ret;
6717
6718         info.buf_len = I40E_AQ_BUF_SZ;
6719         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6720         if (!info.msg_buf) {
6721                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6722                 return;
6723         }
6724
6725         pending = 1;
6726         while (pending) {
6727                 ret = i40e_clean_arq_element(hw, &info, &pending);
6728
6729                 if (ret != I40E_SUCCESS) {
6730                         PMD_DRV_LOG(INFO,
6731                                 "Failed to read msg from AdminQ, aq_err: %u",
6732                                 hw->aq.asq_last_status);
6733                         break;
6734                 }
6735                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6736
6737                 switch (opcode) {
6738                 case i40e_aqc_opc_send_msg_to_pf:
6739                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6740                         i40e_pf_host_handle_vf_msg(dev,
6741                                         rte_le_to_cpu_16(info.desc.retval),
6742                                         rte_le_to_cpu_32(info.desc.cookie_high),
6743                                         rte_le_to_cpu_32(info.desc.cookie_low),
6744                                         info.msg_buf,
6745                                         info.msg_len);
6746                         break;
6747                 case i40e_aqc_opc_get_link_status:
6748                         ret = i40e_dev_link_update(dev, 0);
6749                         if (!ret)
6750                                 rte_eth_dev_callback_process(dev,
6751                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6752                         break;
6753                 default:
6754                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6755                                     opcode);
6756                         break;
6757                 }
6758         }
6759         rte_free(info.msg_buf);
6760 }
6761
6762 static void
6763 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6764 {
6765 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6766 #define I40E_MDD_CLEAR16 0xFFFF
6767         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6768         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6769         bool mdd_detected = false;
6770         struct i40e_pf_vf *vf;
6771         uint32_t reg;
6772         int i;
6773
6774         /* find what triggered the MDD event */
6775         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6776         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6777                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6778                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6779                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6780                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6781                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6782                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6783                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6784                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6785                                         hw->func_caps.base_queue;
6786                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6787                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6788                                 event, queue, pf_num, vf_num, dev->data->name);
6789                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6790                 mdd_detected = true;
6791         }
6792         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6793         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6794                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6795                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6796                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6797                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6798                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6799                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6800                                         hw->func_caps.base_queue;
6801
6802                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6803                                 "queue %d of function 0x%02x device %s\n",
6804                                         event, queue, func, dev->data->name);
6805                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6806                 mdd_detected = true;
6807         }
6808
6809         if (mdd_detected) {
6810                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6811                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6812                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6813                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6814                 }
6815                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6816                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6817                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6818                                         I40E_MDD_CLEAR16);
6819                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6820                 }
6821         }
6822
6823         /* see if one of the VFs needs its hand slapped */
6824         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6825                 vf = &pf->vfs[i];
6826                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6827                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6828                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6829                                         I40E_MDD_CLEAR16);
6830                         vf->num_mdd_events++;
6831                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6832                                         PRIu64 "times\n",
6833                                         i, vf->num_mdd_events);
6834                 }
6835
6836                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6837                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6838                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6839                                         I40E_MDD_CLEAR16);
6840                         vf->num_mdd_events++;
6841                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6842                                         PRIu64 "times\n",
6843                                         i, vf->num_mdd_events);
6844                 }
6845         }
6846 }
6847
6848 /**
6849  * Interrupt handler triggered by NIC  for handling
6850  * specific interrupt.
6851  *
6852  * @param handle
6853  *  Pointer to interrupt handle.
6854  * @param param
6855  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6856  *
6857  * @return
6858  *  void
6859  */
6860 static void
6861 i40e_dev_interrupt_handler(void *param)
6862 {
6863         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6864         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6865         uint32_t icr0;
6866
6867         /* Disable interrupt */
6868         i40e_pf_disable_irq0(hw);
6869
6870         /* read out interrupt causes */
6871         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6872
6873         /* No interrupt event indicated */
6874         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6875                 PMD_DRV_LOG(INFO, "No interrupt event");
6876                 goto done;
6877         }
6878         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6879                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6880         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6881                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6882                 i40e_handle_mdd_event(dev);
6883         }
6884         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6885                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6886         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6887                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6888         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6889                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6890         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6891                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6892         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6893                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6894
6895         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6896                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6897                 i40e_dev_handle_vfr_event(dev);
6898         }
6899         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6900                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6901                 i40e_dev_handle_aq_msg(dev);
6902         }
6903
6904 done:
6905         /* Enable interrupt */
6906         i40e_pf_enable_irq0(hw);
6907 }
6908
6909 static void
6910 i40e_dev_alarm_handler(void *param)
6911 {
6912         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6913         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6914         uint32_t icr0;
6915
6916         /* Disable interrupt */
6917         i40e_pf_disable_irq0(hw);
6918
6919         /* read out interrupt causes */
6920         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6921
6922         /* No interrupt event indicated */
6923         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6924                 goto done;
6925         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6926                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6927         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6928                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6929                 i40e_handle_mdd_event(dev);
6930         }
6931         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6932                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6933         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6934                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6935         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6936                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6937         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6938                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6939         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6940                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6941
6942         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6943                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6944                 i40e_dev_handle_vfr_event(dev);
6945         }
6946         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6947                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6948                 i40e_dev_handle_aq_msg(dev);
6949         }
6950
6951 done:
6952         /* Enable interrupt */
6953         i40e_pf_enable_irq0(hw);
6954         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6955                           i40e_dev_alarm_handler, dev);
6956 }
6957
6958 int
6959 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6960                          struct i40e_macvlan_filter *filter,
6961                          int total)
6962 {
6963         int ele_num, ele_buff_size;
6964         int num, actual_num, i;
6965         uint16_t flags;
6966         int ret = I40E_SUCCESS;
6967         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6968         struct i40e_aqc_add_macvlan_element_data *req_list;
6969
6970         if (filter == NULL  || total == 0)
6971                 return I40E_ERR_PARAM;
6972         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6973         ele_buff_size = hw->aq.asq_buf_size;
6974
6975         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6976         if (req_list == NULL) {
6977                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6978                 return I40E_ERR_NO_MEMORY;
6979         }
6980
6981         num = 0;
6982         do {
6983                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6984                 memset(req_list, 0, ele_buff_size);
6985
6986                 for (i = 0; i < actual_num; i++) {
6987                         rte_memcpy(req_list[i].mac_addr,
6988                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6989                         req_list[i].vlan_tag =
6990                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6991
6992                         switch (filter[num + i].filter_type) {
6993                         case RTE_MAC_PERFECT_MATCH:
6994                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6995                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6996                                 break;
6997                         case RTE_MACVLAN_PERFECT_MATCH:
6998                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6999                                 break;
7000                         case RTE_MAC_HASH_MATCH:
7001                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
7002                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7003                                 break;
7004                         case RTE_MACVLAN_HASH_MATCH:
7005                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7006                                 break;
7007                         default:
7008                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
7009                                 ret = I40E_ERR_PARAM;
7010                                 goto DONE;
7011                         }
7012
7013                         req_list[i].queue_number = 0;
7014
7015                         req_list[i].flags = rte_cpu_to_le_16(flags);
7016                 }
7017
7018                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7019                                                 actual_num, NULL);
7020                 if (ret != I40E_SUCCESS) {
7021                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7022                         goto DONE;
7023                 }
7024                 num += actual_num;
7025         } while (num < total);
7026
7027 DONE:
7028         rte_free(req_list);
7029         return ret;
7030 }
7031
7032 int
7033 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7034                             struct i40e_macvlan_filter *filter,
7035                             int total)
7036 {
7037         int ele_num, ele_buff_size;
7038         int num, actual_num, i;
7039         uint16_t flags;
7040         int ret = I40E_SUCCESS;
7041         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7042         struct i40e_aqc_remove_macvlan_element_data *req_list;
7043
7044         if (filter == NULL  || total == 0)
7045                 return I40E_ERR_PARAM;
7046
7047         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7048         ele_buff_size = hw->aq.asq_buf_size;
7049
7050         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7051         if (req_list == NULL) {
7052                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7053                 return I40E_ERR_NO_MEMORY;
7054         }
7055
7056         num = 0;
7057         do {
7058                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7059                 memset(req_list, 0, ele_buff_size);
7060
7061                 for (i = 0; i < actual_num; i++) {
7062                         rte_memcpy(req_list[i].mac_addr,
7063                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7064                         req_list[i].vlan_tag =
7065                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7066
7067                         switch (filter[num + i].filter_type) {
7068                         case RTE_MAC_PERFECT_MATCH:
7069                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7070                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7071                                 break;
7072                         case RTE_MACVLAN_PERFECT_MATCH:
7073                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7074                                 break;
7075                         case RTE_MAC_HASH_MATCH:
7076                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7077                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7078                                 break;
7079                         case RTE_MACVLAN_HASH_MATCH:
7080                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7081                                 break;
7082                         default:
7083                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7084                                 ret = I40E_ERR_PARAM;
7085                                 goto DONE;
7086                         }
7087                         req_list[i].flags = rte_cpu_to_le_16(flags);
7088                 }
7089
7090                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7091                                                 actual_num, NULL);
7092                 if (ret != I40E_SUCCESS) {
7093                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7094                         goto DONE;
7095                 }
7096                 num += actual_num;
7097         } while (num < total);
7098
7099 DONE:
7100         rte_free(req_list);
7101         return ret;
7102 }
7103
7104 /* Find out specific MAC filter */
7105 static struct i40e_mac_filter *
7106 i40e_find_mac_filter(struct i40e_vsi *vsi,
7107                          struct rte_ether_addr *macaddr)
7108 {
7109         struct i40e_mac_filter *f;
7110
7111         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7112                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7113                         return f;
7114         }
7115
7116         return NULL;
7117 }
7118
7119 static bool
7120 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7121                          uint16_t vlan_id)
7122 {
7123         uint32_t vid_idx, vid_bit;
7124
7125         if (vlan_id > ETH_VLAN_ID_MAX)
7126                 return 0;
7127
7128         vid_idx = I40E_VFTA_IDX(vlan_id);
7129         vid_bit = I40E_VFTA_BIT(vlan_id);
7130
7131         if (vsi->vfta[vid_idx] & vid_bit)
7132                 return 1;
7133         else
7134                 return 0;
7135 }
7136
7137 static void
7138 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7139                        uint16_t vlan_id, bool on)
7140 {
7141         uint32_t vid_idx, vid_bit;
7142
7143         vid_idx = I40E_VFTA_IDX(vlan_id);
7144         vid_bit = I40E_VFTA_BIT(vlan_id);
7145
7146         if (on)
7147                 vsi->vfta[vid_idx] |= vid_bit;
7148         else
7149                 vsi->vfta[vid_idx] &= ~vid_bit;
7150 }
7151
7152 void
7153 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7154                      uint16_t vlan_id, bool on)
7155 {
7156         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7157         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7158         int ret;
7159
7160         if (vlan_id > ETH_VLAN_ID_MAX)
7161                 return;
7162
7163         i40e_store_vlan_filter(vsi, vlan_id, on);
7164
7165         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7166                 return;
7167
7168         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7169
7170         if (on) {
7171                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7172                                        &vlan_data, 1, NULL);
7173                 if (ret != I40E_SUCCESS)
7174                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7175         } else {
7176                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7177                                           &vlan_data, 1, NULL);
7178                 if (ret != I40E_SUCCESS)
7179                         PMD_DRV_LOG(ERR,
7180                                     "Failed to remove vlan filter");
7181         }
7182 }
7183
7184 /**
7185  * Find all vlan options for specific mac addr,
7186  * return with actual vlan found.
7187  */
7188 int
7189 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7190                            struct i40e_macvlan_filter *mv_f,
7191                            int num, struct rte_ether_addr *addr)
7192 {
7193         int i;
7194         uint32_t j, k;
7195
7196         /**
7197          * Not to use i40e_find_vlan_filter to decrease the loop time,
7198          * although the code looks complex.
7199           */
7200         if (num < vsi->vlan_num)
7201                 return I40E_ERR_PARAM;
7202
7203         i = 0;
7204         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7205                 if (vsi->vfta[j]) {
7206                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7207                                 if (vsi->vfta[j] & (1 << k)) {
7208                                         if (i > num - 1) {
7209                                                 PMD_DRV_LOG(ERR,
7210                                                         "vlan number doesn't match");
7211                                                 return I40E_ERR_PARAM;
7212                                         }
7213                                         rte_memcpy(&mv_f[i].macaddr,
7214                                                         addr, ETH_ADDR_LEN);
7215                                         mv_f[i].vlan_id =
7216                                                 j * I40E_UINT32_BIT_SIZE + k;
7217                                         i++;
7218                                 }
7219                         }
7220                 }
7221         }
7222         return I40E_SUCCESS;
7223 }
7224
7225 static inline int
7226 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7227                            struct i40e_macvlan_filter *mv_f,
7228                            int num,
7229                            uint16_t vlan)
7230 {
7231         int i = 0;
7232         struct i40e_mac_filter *f;
7233
7234         if (num < vsi->mac_num)
7235                 return I40E_ERR_PARAM;
7236
7237         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7238                 if (i > num - 1) {
7239                         PMD_DRV_LOG(ERR, "buffer number not match");
7240                         return I40E_ERR_PARAM;
7241                 }
7242                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7243                                 ETH_ADDR_LEN);
7244                 mv_f[i].vlan_id = vlan;
7245                 mv_f[i].filter_type = f->mac_info.filter_type;
7246                 i++;
7247         }
7248
7249         return I40E_SUCCESS;
7250 }
7251
7252 static int
7253 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7254 {
7255         int i, j, num;
7256         struct i40e_mac_filter *f;
7257         struct i40e_macvlan_filter *mv_f;
7258         int ret = I40E_SUCCESS;
7259
7260         if (vsi == NULL || vsi->mac_num == 0)
7261                 return I40E_ERR_PARAM;
7262
7263         /* Case that no vlan is set */
7264         if (vsi->vlan_num == 0)
7265                 num = vsi->mac_num;
7266         else
7267                 num = vsi->mac_num * vsi->vlan_num;
7268
7269         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7270         if (mv_f == NULL) {
7271                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7272                 return I40E_ERR_NO_MEMORY;
7273         }
7274
7275         i = 0;
7276         if (vsi->vlan_num == 0) {
7277                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7278                         rte_memcpy(&mv_f[i].macaddr,
7279                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7280                         mv_f[i].filter_type = f->mac_info.filter_type;
7281                         mv_f[i].vlan_id = 0;
7282                         i++;
7283                 }
7284         } else {
7285                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7286                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7287                                         vsi->vlan_num, &f->mac_info.mac_addr);
7288                         if (ret != I40E_SUCCESS)
7289                                 goto DONE;
7290                         for (j = i; j < i + vsi->vlan_num; j++)
7291                                 mv_f[j].filter_type = f->mac_info.filter_type;
7292                         i += vsi->vlan_num;
7293                 }
7294         }
7295
7296         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7297 DONE:
7298         rte_free(mv_f);
7299
7300         return ret;
7301 }
7302
7303 int
7304 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7305 {
7306         struct i40e_macvlan_filter *mv_f;
7307         int mac_num;
7308         int ret = I40E_SUCCESS;
7309
7310         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7311                 return I40E_ERR_PARAM;
7312
7313         /* If it's already set, just return */
7314         if (i40e_find_vlan_filter(vsi,vlan))
7315                 return I40E_SUCCESS;
7316
7317         mac_num = vsi->mac_num;
7318
7319         if (mac_num == 0) {
7320                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7321                 return I40E_ERR_PARAM;
7322         }
7323
7324         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7325
7326         if (mv_f == NULL) {
7327                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7328                 return I40E_ERR_NO_MEMORY;
7329         }
7330
7331         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7332
7333         if (ret != I40E_SUCCESS)
7334                 goto DONE;
7335
7336         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7337
7338         if (ret != I40E_SUCCESS)
7339                 goto DONE;
7340
7341         i40e_set_vlan_filter(vsi, vlan, 1);
7342
7343         vsi->vlan_num++;
7344         ret = I40E_SUCCESS;
7345 DONE:
7346         rte_free(mv_f);
7347         return ret;
7348 }
7349
7350 int
7351 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7352 {
7353         struct i40e_macvlan_filter *mv_f;
7354         int mac_num;
7355         int ret = I40E_SUCCESS;
7356
7357         /**
7358          * Vlan 0 is the generic filter for untagged packets
7359          * and can't be removed.
7360          */
7361         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7362                 return I40E_ERR_PARAM;
7363
7364         /* If can't find it, just return */
7365         if (!i40e_find_vlan_filter(vsi, vlan))
7366                 return I40E_ERR_PARAM;
7367
7368         mac_num = vsi->mac_num;
7369
7370         if (mac_num == 0) {
7371                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7372                 return I40E_ERR_PARAM;
7373         }
7374
7375         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7376
7377         if (mv_f == NULL) {
7378                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7379                 return I40E_ERR_NO_MEMORY;
7380         }
7381
7382         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7383
7384         if (ret != I40E_SUCCESS)
7385                 goto DONE;
7386
7387         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7388
7389         if (ret != I40E_SUCCESS)
7390                 goto DONE;
7391
7392         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7393         if (vsi->vlan_num == 1) {
7394                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7395                 if (ret != I40E_SUCCESS)
7396                         goto DONE;
7397
7398                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7399                 if (ret != I40E_SUCCESS)
7400                         goto DONE;
7401         }
7402
7403         i40e_set_vlan_filter(vsi, vlan, 0);
7404
7405         vsi->vlan_num--;
7406         ret = I40E_SUCCESS;
7407 DONE:
7408         rte_free(mv_f);
7409         return ret;
7410 }
7411
7412 int
7413 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7414 {
7415         struct i40e_mac_filter *f;
7416         struct i40e_macvlan_filter *mv_f;
7417         int i, vlan_num = 0;
7418         int ret = I40E_SUCCESS;
7419
7420         /* If it's add and we've config it, return */
7421         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7422         if (f != NULL)
7423                 return I40E_SUCCESS;
7424         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
7425                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
7426
7427                 /**
7428                  * If vlan_num is 0, that's the first time to add mac,
7429                  * set mask for vlan_id 0.
7430                  */
7431                 if (vsi->vlan_num == 0) {
7432                         i40e_set_vlan_filter(vsi, 0, 1);
7433                         vsi->vlan_num = 1;
7434                 }
7435                 vlan_num = vsi->vlan_num;
7436         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
7437                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
7438                 vlan_num = 1;
7439
7440         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7441         if (mv_f == NULL) {
7442                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7443                 return I40E_ERR_NO_MEMORY;
7444         }
7445
7446         for (i = 0; i < vlan_num; i++) {
7447                 mv_f[i].filter_type = mac_filter->filter_type;
7448                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7449                                 ETH_ADDR_LEN);
7450         }
7451
7452         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7453                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
7454                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7455                                         &mac_filter->mac_addr);
7456                 if (ret != I40E_SUCCESS)
7457                         goto DONE;
7458         }
7459
7460         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7461         if (ret != I40E_SUCCESS)
7462                 goto DONE;
7463
7464         /* Add the mac addr into mac list */
7465         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7466         if (f == NULL) {
7467                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7468                 ret = I40E_ERR_NO_MEMORY;
7469                 goto DONE;
7470         }
7471         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7472                         ETH_ADDR_LEN);
7473         f->mac_info.filter_type = mac_filter->filter_type;
7474         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7475         vsi->mac_num++;
7476
7477         ret = I40E_SUCCESS;
7478 DONE:
7479         rte_free(mv_f);
7480
7481         return ret;
7482 }
7483
7484 int
7485 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7486 {
7487         struct i40e_mac_filter *f;
7488         struct i40e_macvlan_filter *mv_f;
7489         int i, vlan_num;
7490         enum rte_mac_filter_type filter_type;
7491         int ret = I40E_SUCCESS;
7492
7493         /* Can't find it, return an error */
7494         f = i40e_find_mac_filter(vsi, addr);
7495         if (f == NULL)
7496                 return I40E_ERR_PARAM;
7497
7498         vlan_num = vsi->vlan_num;
7499         filter_type = f->mac_info.filter_type;
7500         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7501                 filter_type == RTE_MACVLAN_HASH_MATCH) {
7502                 if (vlan_num == 0) {
7503                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7504                         return I40E_ERR_PARAM;
7505                 }
7506         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7507                         filter_type == RTE_MAC_HASH_MATCH)
7508                 vlan_num = 1;
7509
7510         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7511         if (mv_f == NULL) {
7512                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7513                 return I40E_ERR_NO_MEMORY;
7514         }
7515
7516         for (i = 0; i < vlan_num; i++) {
7517                 mv_f[i].filter_type = filter_type;
7518                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7519                                 ETH_ADDR_LEN);
7520         }
7521         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7522                         filter_type == RTE_MACVLAN_HASH_MATCH) {
7523                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7524                 if (ret != I40E_SUCCESS)
7525                         goto DONE;
7526         }
7527
7528         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7529         if (ret != I40E_SUCCESS)
7530                 goto DONE;
7531
7532         /* Remove the mac addr into mac list */
7533         TAILQ_REMOVE(&vsi->mac_list, f, next);
7534         rte_free(f);
7535         vsi->mac_num--;
7536
7537         ret = I40E_SUCCESS;
7538 DONE:
7539         rte_free(mv_f);
7540         return ret;
7541 }
7542
7543 /* Configure hash enable flags for RSS */
7544 uint64_t
7545 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7546 {
7547         uint64_t hena = 0;
7548         int i;
7549
7550         if (!flags)
7551                 return hena;
7552
7553         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7554                 if (flags & (1ULL << i))
7555                         hena |= adapter->pctypes_tbl[i];
7556         }
7557
7558         return hena;
7559 }
7560
7561 /* Parse the hash enable flags */
7562 uint64_t
7563 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7564 {
7565         uint64_t rss_hf = 0;
7566
7567         if (!flags)
7568                 return rss_hf;
7569         int i;
7570
7571         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7572                 if (flags & adapter->pctypes_tbl[i])
7573                         rss_hf |= (1ULL << i);
7574         }
7575         return rss_hf;
7576 }
7577
7578 /* Disable RSS */
7579 static void
7580 i40e_pf_disable_rss(struct i40e_pf *pf)
7581 {
7582         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7583
7584         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7585         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7586         I40E_WRITE_FLUSH(hw);
7587 }
7588
7589 int
7590 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7591 {
7592         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7593         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7594         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7595                            I40E_VFQF_HKEY_MAX_INDEX :
7596                            I40E_PFQF_HKEY_MAX_INDEX;
7597         int ret = 0;
7598
7599         if (!key || key_len == 0) {
7600                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7601                 return 0;
7602         } else if (key_len != (key_idx + 1) *
7603                 sizeof(uint32_t)) {
7604                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7605                 return -EINVAL;
7606         }
7607
7608         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7609                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7610                         (struct i40e_aqc_get_set_rss_key_data *)key;
7611
7612                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7613                 if (ret)
7614                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7615         } else {
7616                 uint32_t *hash_key = (uint32_t *)key;
7617                 uint16_t i;
7618
7619                 if (vsi->type == I40E_VSI_SRIOV) {
7620                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7621                                 I40E_WRITE_REG(
7622                                         hw,
7623                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7624                                         hash_key[i]);
7625
7626                 } else {
7627                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7628                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7629                                                hash_key[i]);
7630                 }
7631                 I40E_WRITE_FLUSH(hw);
7632         }
7633
7634         return ret;
7635 }
7636
7637 static int
7638 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7639 {
7640         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7641         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7642         uint32_t reg;
7643         int ret;
7644
7645         if (!key || !key_len)
7646                 return 0;
7647
7648         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7649                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7650                         (struct i40e_aqc_get_set_rss_key_data *)key);
7651                 if (ret) {
7652                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7653                         return ret;
7654                 }
7655         } else {
7656                 uint32_t *key_dw = (uint32_t *)key;
7657                 uint16_t i;
7658
7659                 if (vsi->type == I40E_VSI_SRIOV) {
7660                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7661                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7662                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7663                         }
7664                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7665                                    sizeof(uint32_t);
7666                 } else {
7667                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7668                                 reg = I40E_PFQF_HKEY(i);
7669                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7670                         }
7671                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7672                                    sizeof(uint32_t);
7673                 }
7674         }
7675         return 0;
7676 }
7677
7678 static int
7679 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7680 {
7681         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7682         uint64_t hena;
7683         int ret;
7684
7685         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7686                                rss_conf->rss_key_len);
7687         if (ret)
7688                 return ret;
7689
7690         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7691         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7692         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7693         I40E_WRITE_FLUSH(hw);
7694
7695         return 0;
7696 }
7697
7698 static int
7699 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7700                          struct rte_eth_rss_conf *rss_conf)
7701 {
7702         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7703         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7704         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7705         uint64_t hena;
7706
7707         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7708         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7709
7710         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7711                 if (rss_hf != 0) /* Enable RSS */
7712                         return -EINVAL;
7713                 return 0; /* Nothing to do */
7714         }
7715         /* RSS enabled */
7716         if (rss_hf == 0) /* Disable RSS */
7717                 return -EINVAL;
7718
7719         return i40e_hw_rss_hash_set(pf, rss_conf);
7720 }
7721
7722 static int
7723 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7724                            struct rte_eth_rss_conf *rss_conf)
7725 {
7726         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7727         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7728         uint64_t hena;
7729         int ret;
7730
7731         if (!rss_conf)
7732                 return -EINVAL;
7733
7734         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7735                          &rss_conf->rss_key_len);
7736         if (ret)
7737                 return ret;
7738
7739         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7740         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7741         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7742
7743         return 0;
7744 }
7745
7746 static int
7747 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7748 {
7749         switch (filter_type) {
7750         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7751                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7752                 break;
7753         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7754                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7755                 break;
7756         case RTE_TUNNEL_FILTER_IMAC_TENID:
7757                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7758                 break;
7759         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7760                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7761                 break;
7762         case ETH_TUNNEL_FILTER_IMAC:
7763                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7764                 break;
7765         case ETH_TUNNEL_FILTER_OIP:
7766                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7767                 break;
7768         case ETH_TUNNEL_FILTER_IIP:
7769                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7770                 break;
7771         default:
7772                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7773                 return -EINVAL;
7774         }
7775
7776         return 0;
7777 }
7778
7779 /* Convert tunnel filter structure */
7780 static int
7781 i40e_tunnel_filter_convert(
7782         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7783         struct i40e_tunnel_filter *tunnel_filter)
7784 {
7785         rte_ether_addr_copy((struct rte_ether_addr *)
7786                         &cld_filter->element.outer_mac,
7787                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7788         rte_ether_addr_copy((struct rte_ether_addr *)
7789                         &cld_filter->element.inner_mac,
7790                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7791         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7792         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7793              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7794             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7795                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7796         else
7797                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7798         tunnel_filter->input.flags = cld_filter->element.flags;
7799         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7800         tunnel_filter->queue = cld_filter->element.queue_number;
7801         rte_memcpy(tunnel_filter->input.general_fields,
7802                    cld_filter->general_fields,
7803                    sizeof(cld_filter->general_fields));
7804
7805         return 0;
7806 }
7807
7808 /* Check if there exists the tunnel filter */
7809 struct i40e_tunnel_filter *
7810 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7811                              const struct i40e_tunnel_filter_input *input)
7812 {
7813         int ret;
7814
7815         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7816         if (ret < 0)
7817                 return NULL;
7818
7819         return tunnel_rule->hash_map[ret];
7820 }
7821
7822 /* Add a tunnel filter into the SW list */
7823 static int
7824 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7825                              struct i40e_tunnel_filter *tunnel_filter)
7826 {
7827         struct i40e_tunnel_rule *rule = &pf->tunnel;
7828         int ret;
7829
7830         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7831         if (ret < 0) {
7832                 PMD_DRV_LOG(ERR,
7833                             "Failed to insert tunnel filter to hash table %d!",
7834                             ret);
7835                 return ret;
7836         }
7837         rule->hash_map[ret] = tunnel_filter;
7838
7839         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7840
7841         return 0;
7842 }
7843
7844 /* Delete a tunnel filter from the SW list */
7845 int
7846 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7847                           struct i40e_tunnel_filter_input *input)
7848 {
7849         struct i40e_tunnel_rule *rule = &pf->tunnel;
7850         struct i40e_tunnel_filter *tunnel_filter;
7851         int ret;
7852
7853         ret = rte_hash_del_key(rule->hash_table, input);
7854         if (ret < 0) {
7855                 PMD_DRV_LOG(ERR,
7856                             "Failed to delete tunnel filter to hash table %d!",
7857                             ret);
7858                 return ret;
7859         }
7860         tunnel_filter = rule->hash_map[ret];
7861         rule->hash_map[ret] = NULL;
7862
7863         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7864         rte_free(tunnel_filter);
7865
7866         return 0;
7867 }
7868
7869 int
7870 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7871                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7872                         uint8_t add)
7873 {
7874         uint16_t ip_type;
7875         uint32_t ipv4_addr, ipv4_addr_le;
7876         uint8_t i, tun_type = 0;
7877         /* internal varialbe to convert ipv6 byte order */
7878         uint32_t convert_ipv6[4];
7879         int val, ret = 0;
7880         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7881         struct i40e_vsi *vsi = pf->main_vsi;
7882         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
7883         struct i40e_aqc_cloud_filters_element_bb *pfilter;
7884         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7885         struct i40e_tunnel_filter *tunnel, *node;
7886         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7887
7888         cld_filter = rte_zmalloc("tunnel_filter",
7889                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7890         0);
7891
7892         if (NULL == cld_filter) {
7893                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7894                 return -ENOMEM;
7895         }
7896         pfilter = cld_filter;
7897
7898         rte_ether_addr_copy(&tunnel_filter->outer_mac,
7899                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
7900         rte_ether_addr_copy(&tunnel_filter->inner_mac,
7901                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
7902
7903         pfilter->element.inner_vlan =
7904                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7905         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7906                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7907                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7908                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7909                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7910                                 &ipv4_addr_le,
7911                                 sizeof(pfilter->element.ipaddr.v4.data));
7912         } else {
7913                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7914                 for (i = 0; i < 4; i++) {
7915                         convert_ipv6[i] =
7916                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7917                 }
7918                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7919                            &convert_ipv6,
7920                            sizeof(pfilter->element.ipaddr.v6.data));
7921         }
7922
7923         /* check tunneled type */
7924         switch (tunnel_filter->tunnel_type) {
7925         case RTE_TUNNEL_TYPE_VXLAN:
7926                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7927                 break;
7928         case RTE_TUNNEL_TYPE_NVGRE:
7929                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7930                 break;
7931         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7932                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7933                 break;
7934         case RTE_TUNNEL_TYPE_VXLAN_GPE:
7935                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
7936                 break;
7937         default:
7938                 /* Other tunnel types is not supported. */
7939                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7940                 rte_free(cld_filter);
7941                 return -EINVAL;
7942         }
7943
7944         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7945                                        &pfilter->element.flags);
7946         if (val < 0) {
7947                 rte_free(cld_filter);
7948                 return -EINVAL;
7949         }
7950
7951         pfilter->element.flags |= rte_cpu_to_le_16(
7952                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7953                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7954         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7955         pfilter->element.queue_number =
7956                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7957
7958         /* Check if there is the filter in SW list */
7959         memset(&check_filter, 0, sizeof(check_filter));
7960         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7961         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7962         if (add && node) {
7963                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7964                 rte_free(cld_filter);
7965                 return -EINVAL;
7966         }
7967
7968         if (!add && !node) {
7969                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7970                 rte_free(cld_filter);
7971                 return -EINVAL;
7972         }
7973
7974         if (add) {
7975                 ret = i40e_aq_add_cloud_filters(hw,
7976                                         vsi->seid, &cld_filter->element, 1);
7977                 if (ret < 0) {
7978                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7979                         rte_free(cld_filter);
7980                         return -ENOTSUP;
7981                 }
7982                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7983                 if (tunnel == NULL) {
7984                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7985                         rte_free(cld_filter);
7986                         return -ENOMEM;
7987                 }
7988
7989                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7990                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7991                 if (ret < 0)
7992                         rte_free(tunnel);
7993         } else {
7994                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
7995                                                    &cld_filter->element, 1);
7996                 if (ret < 0) {
7997                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7998                         rte_free(cld_filter);
7999                         return -ENOTSUP;
8000                 }
8001                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8002         }
8003
8004         rte_free(cld_filter);
8005         return ret;
8006 }
8007
8008 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
8009 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
8010 #define I40E_TR_GENEVE_KEY_MASK                 0x8
8011 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
8012 #define I40E_TR_GRE_KEY_MASK                    0x400
8013 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
8014 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
8015 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
8016 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
8017 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
8018 #define I40E_DIRECTION_INGRESS_KEY              0x8000
8019 #define I40E_TR_L4_TYPE_TCP                     0x2
8020 #define I40E_TR_L4_TYPE_UDP                     0x4
8021 #define I40E_TR_L4_TYPE_SCTP                    0x8
8022
8023 static enum
8024 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
8025 {
8026         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8027         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8028         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8029         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8030         enum i40e_status_code status = I40E_SUCCESS;
8031
8032         if (pf->support_multi_driver) {
8033                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8034                 return I40E_NOT_SUPPORTED;
8035         }
8036
8037         memset(&filter_replace, 0,
8038                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8039         memset(&filter_replace_buf, 0,
8040                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8041
8042         /* create L1 filter */
8043         filter_replace.old_filter_type =
8044                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8045         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8046         filter_replace.tr_bit = 0;
8047
8048         /* Prepare the buffer, 3 entries */
8049         filter_replace_buf.data[0] =
8050                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8051         filter_replace_buf.data[0] |=
8052                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8053         filter_replace_buf.data[2] = 0xFF;
8054         filter_replace_buf.data[3] = 0xFF;
8055         filter_replace_buf.data[4] =
8056                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8057         filter_replace_buf.data[4] |=
8058                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8059         filter_replace_buf.data[7] = 0xF0;
8060         filter_replace_buf.data[8]
8061                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
8062         filter_replace_buf.data[8] |=
8063                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8064         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
8065                 I40E_TR_GENEVE_KEY_MASK |
8066                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
8067         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
8068                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
8069                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
8070
8071         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8072                                                &filter_replace_buf);
8073         if (!status && (filter_replace.old_filter_type !=
8074                         filter_replace.new_filter_type))
8075                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8076                             " original: 0x%x, new: 0x%x",
8077                             dev->device->name,
8078                             filter_replace.old_filter_type,
8079                             filter_replace.new_filter_type);
8080
8081         return status;
8082 }
8083
8084 static enum
8085 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
8086 {
8087         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8088         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8089         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8090         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8091         enum i40e_status_code status = I40E_SUCCESS;
8092
8093         if (pf->support_multi_driver) {
8094                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8095                 return I40E_NOT_SUPPORTED;
8096         }
8097
8098         /* For MPLSoUDP */
8099         memset(&filter_replace, 0,
8100                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8101         memset(&filter_replace_buf, 0,
8102                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8103         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8104                 I40E_AQC_MIRROR_CLOUD_FILTER;
8105         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8106         filter_replace.new_filter_type =
8107                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8108         /* Prepare the buffer, 2 entries */
8109         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8110         filter_replace_buf.data[0] |=
8111                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8112         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8113         filter_replace_buf.data[4] |=
8114                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8115         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8116                                                &filter_replace_buf);
8117         if (status < 0)
8118                 return status;
8119         if (filter_replace.old_filter_type !=
8120             filter_replace.new_filter_type)
8121                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8122                             " original: 0x%x, new: 0x%x",
8123                             dev->device->name,
8124                             filter_replace.old_filter_type,
8125                             filter_replace.new_filter_type);
8126
8127         /* For MPLSoGRE */
8128         memset(&filter_replace, 0,
8129                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8130         memset(&filter_replace_buf, 0,
8131                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8132
8133         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8134                 I40E_AQC_MIRROR_CLOUD_FILTER;
8135         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
8136         filter_replace.new_filter_type =
8137                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8138         /* Prepare the buffer, 2 entries */
8139         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8140         filter_replace_buf.data[0] |=
8141                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8142         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8143         filter_replace_buf.data[4] |=
8144                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8145
8146         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8147                                                &filter_replace_buf);
8148         if (!status && (filter_replace.old_filter_type !=
8149                         filter_replace.new_filter_type))
8150                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8151                             " original: 0x%x, new: 0x%x",
8152                             dev->device->name,
8153                             filter_replace.old_filter_type,
8154                             filter_replace.new_filter_type);
8155
8156         return status;
8157 }
8158
8159 static enum i40e_status_code
8160 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8161 {
8162         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8163         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8164         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8165         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8166         enum i40e_status_code status = I40E_SUCCESS;
8167
8168         if (pf->support_multi_driver) {
8169                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8170                 return I40E_NOT_SUPPORTED;
8171         }
8172
8173         /* For GTP-C */
8174         memset(&filter_replace, 0,
8175                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8176         memset(&filter_replace_buf, 0,
8177                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8178         /* create L1 filter */
8179         filter_replace.old_filter_type =
8180                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8181         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8182         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8183                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8184         /* Prepare the buffer, 2 entries */
8185         filter_replace_buf.data[0] =
8186                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8187         filter_replace_buf.data[0] |=
8188                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8189         filter_replace_buf.data[2] = 0xFF;
8190         filter_replace_buf.data[3] = 0xFF;
8191         filter_replace_buf.data[4] =
8192                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8193         filter_replace_buf.data[4] |=
8194                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8195         filter_replace_buf.data[6] = 0xFF;
8196         filter_replace_buf.data[7] = 0xFF;
8197         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8198                                                &filter_replace_buf);
8199         if (status < 0)
8200                 return status;
8201         if (filter_replace.old_filter_type !=
8202             filter_replace.new_filter_type)
8203                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8204                             " original: 0x%x, new: 0x%x",
8205                             dev->device->name,
8206                             filter_replace.old_filter_type,
8207                             filter_replace.new_filter_type);
8208
8209         /* for GTP-U */
8210         memset(&filter_replace, 0,
8211                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8212         memset(&filter_replace_buf, 0,
8213                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8214         /* create L1 filter */
8215         filter_replace.old_filter_type =
8216                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8217         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8218         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8219                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8220         /* Prepare the buffer, 2 entries */
8221         filter_replace_buf.data[0] =
8222                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8223         filter_replace_buf.data[0] |=
8224                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8225         filter_replace_buf.data[2] = 0xFF;
8226         filter_replace_buf.data[3] = 0xFF;
8227         filter_replace_buf.data[4] =
8228                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8229         filter_replace_buf.data[4] |=
8230                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8231         filter_replace_buf.data[6] = 0xFF;
8232         filter_replace_buf.data[7] = 0xFF;
8233
8234         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8235                                                &filter_replace_buf);
8236         if (!status && (filter_replace.old_filter_type !=
8237                         filter_replace.new_filter_type))
8238                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8239                             " original: 0x%x, new: 0x%x",
8240                             dev->device->name,
8241                             filter_replace.old_filter_type,
8242                             filter_replace.new_filter_type);
8243
8244         return status;
8245 }
8246
8247 static enum
8248 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8249 {
8250         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8251         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8252         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8253         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8254         enum i40e_status_code status = I40E_SUCCESS;
8255
8256         if (pf->support_multi_driver) {
8257                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8258                 return I40E_NOT_SUPPORTED;
8259         }
8260
8261         /* for GTP-C */
8262         memset(&filter_replace, 0,
8263                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8264         memset(&filter_replace_buf, 0,
8265                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8266         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8267         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8268         filter_replace.new_filter_type =
8269                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8270         /* Prepare the buffer, 2 entries */
8271         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8272         filter_replace_buf.data[0] |=
8273                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8274         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8275         filter_replace_buf.data[4] |=
8276                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8277         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8278                                                &filter_replace_buf);
8279         if (status < 0)
8280                 return status;
8281         if (filter_replace.old_filter_type !=
8282             filter_replace.new_filter_type)
8283                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8284                             " original: 0x%x, new: 0x%x",
8285                             dev->device->name,
8286                             filter_replace.old_filter_type,
8287                             filter_replace.new_filter_type);
8288
8289         /* for GTP-U */
8290         memset(&filter_replace, 0,
8291                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8292         memset(&filter_replace_buf, 0,
8293                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8294         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8295         filter_replace.old_filter_type =
8296                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8297         filter_replace.new_filter_type =
8298                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8299         /* Prepare the buffer, 2 entries */
8300         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8301         filter_replace_buf.data[0] |=
8302                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8303         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8304         filter_replace_buf.data[4] |=
8305                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8306
8307         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8308                                                &filter_replace_buf);
8309         if (!status && (filter_replace.old_filter_type !=
8310                         filter_replace.new_filter_type))
8311                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8312                             " original: 0x%x, new: 0x%x",
8313                             dev->device->name,
8314                             filter_replace.old_filter_type,
8315                             filter_replace.new_filter_type);
8316
8317         return status;
8318 }
8319
8320 static enum i40e_status_code
8321 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8322                             enum i40e_l4_port_type l4_port_type)
8323 {
8324         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8325         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8326         enum i40e_status_code status = I40E_SUCCESS;
8327         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8328         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8329
8330         if (pf->support_multi_driver) {
8331                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8332                 return I40E_NOT_SUPPORTED;
8333         }
8334
8335         memset(&filter_replace, 0,
8336                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8337         memset(&filter_replace_buf, 0,
8338                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8339
8340         /* create L1 filter */
8341         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8342                 filter_replace.old_filter_type =
8343                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8344                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8345                 filter_replace_buf.data[8] =
8346                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8347         } else {
8348                 filter_replace.old_filter_type =
8349                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8350                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8351                 filter_replace_buf.data[8] =
8352                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8353         }
8354
8355         filter_replace.tr_bit = 0;
8356         /* Prepare the buffer, 3 entries */
8357         filter_replace_buf.data[0] =
8358                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8359         filter_replace_buf.data[0] |=
8360                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8361         filter_replace_buf.data[2] = 0x00;
8362         filter_replace_buf.data[3] =
8363                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8364         filter_replace_buf.data[4] =
8365                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8366         filter_replace_buf.data[4] |=
8367                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8368         filter_replace_buf.data[5] = 0x00;
8369         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8370                 I40E_TR_L4_TYPE_TCP |
8371                 I40E_TR_L4_TYPE_SCTP;
8372         filter_replace_buf.data[7] = 0x00;
8373         filter_replace_buf.data[8] |=
8374                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8375         filter_replace_buf.data[9] = 0x00;
8376         filter_replace_buf.data[10] = 0xFF;
8377         filter_replace_buf.data[11] = 0xFF;
8378
8379         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8380                                                &filter_replace_buf);
8381         if (!status && filter_replace.old_filter_type !=
8382             filter_replace.new_filter_type)
8383                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8384                             " original: 0x%x, new: 0x%x",
8385                             dev->device->name,
8386                             filter_replace.old_filter_type,
8387                             filter_replace.new_filter_type);
8388
8389         return status;
8390 }
8391
8392 static enum i40e_status_code
8393 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8394                                enum i40e_l4_port_type l4_port_type)
8395 {
8396         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8397         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8398         enum i40e_status_code status = I40E_SUCCESS;
8399         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8400         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8401
8402         if (pf->support_multi_driver) {
8403                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8404                 return I40E_NOT_SUPPORTED;
8405         }
8406
8407         memset(&filter_replace, 0,
8408                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8409         memset(&filter_replace_buf, 0,
8410                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8411
8412         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8413                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8414                 filter_replace.new_filter_type =
8415                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8416                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8417         } else {
8418                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8419                 filter_replace.new_filter_type =
8420                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8421                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8422         }
8423
8424         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8425         filter_replace.tr_bit = 0;
8426         /* Prepare the buffer, 2 entries */
8427         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8428         filter_replace_buf.data[0] |=
8429                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8430         filter_replace_buf.data[4] |=
8431                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8432         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8433                                                &filter_replace_buf);
8434
8435         if (!status && filter_replace.old_filter_type !=
8436             filter_replace.new_filter_type)
8437                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8438                             " original: 0x%x, new: 0x%x",
8439                             dev->device->name,
8440                             filter_replace.old_filter_type,
8441                             filter_replace.new_filter_type);
8442
8443         return status;
8444 }
8445
8446 int
8447 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8448                       struct i40e_tunnel_filter_conf *tunnel_filter,
8449                       uint8_t add)
8450 {
8451         uint16_t ip_type;
8452         uint32_t ipv4_addr, ipv4_addr_le;
8453         uint8_t i, tun_type = 0;
8454         /* internal variable to convert ipv6 byte order */
8455         uint32_t convert_ipv6[4];
8456         int val, ret = 0;
8457         struct i40e_pf_vf *vf = NULL;
8458         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8459         struct i40e_vsi *vsi;
8460         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8461         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8462         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8463         struct i40e_tunnel_filter *tunnel, *node;
8464         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8465         uint32_t teid_le;
8466         bool big_buffer = 0;
8467
8468         cld_filter = rte_zmalloc("tunnel_filter",
8469                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8470                          0);
8471
8472         if (cld_filter == NULL) {
8473                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8474                 return -ENOMEM;
8475         }
8476         pfilter = cld_filter;
8477
8478         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8479                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8480         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8481                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8482
8483         pfilter->element.inner_vlan =
8484                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8485         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8486                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8487                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8488                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8489                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8490                                 &ipv4_addr_le,
8491                                 sizeof(pfilter->element.ipaddr.v4.data));
8492         } else {
8493                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8494                 for (i = 0; i < 4; i++) {
8495                         convert_ipv6[i] =
8496                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8497                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8498                 }
8499                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8500                            &convert_ipv6,
8501                            sizeof(pfilter->element.ipaddr.v6.data));
8502         }
8503
8504         /* check tunneled type */
8505         switch (tunnel_filter->tunnel_type) {
8506         case I40E_TUNNEL_TYPE_VXLAN:
8507                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8508                 break;
8509         case I40E_TUNNEL_TYPE_NVGRE:
8510                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8511                 break;
8512         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8513                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8514                 break;
8515         case I40E_TUNNEL_TYPE_MPLSoUDP:
8516                 if (!pf->mpls_replace_flag) {
8517                         i40e_replace_mpls_l1_filter(pf);
8518                         i40e_replace_mpls_cloud_filter(pf);
8519                         pf->mpls_replace_flag = 1;
8520                 }
8521                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8522                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8523                         teid_le >> 4;
8524                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8525                         (teid_le & 0xF) << 12;
8526                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8527                         0x40;
8528                 big_buffer = 1;
8529                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8530                 break;
8531         case I40E_TUNNEL_TYPE_MPLSoGRE:
8532                 if (!pf->mpls_replace_flag) {
8533                         i40e_replace_mpls_l1_filter(pf);
8534                         i40e_replace_mpls_cloud_filter(pf);
8535                         pf->mpls_replace_flag = 1;
8536                 }
8537                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8538                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8539                         teid_le >> 4;
8540                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8541                         (teid_le & 0xF) << 12;
8542                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8543                         0x0;
8544                 big_buffer = 1;
8545                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8546                 break;
8547         case I40E_TUNNEL_TYPE_GTPC:
8548                 if (!pf->gtp_replace_flag) {
8549                         i40e_replace_gtp_l1_filter(pf);
8550                         i40e_replace_gtp_cloud_filter(pf);
8551                         pf->gtp_replace_flag = 1;
8552                 }
8553                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8554                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8555                         (teid_le >> 16) & 0xFFFF;
8556                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8557                         teid_le & 0xFFFF;
8558                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8559                         0x0;
8560                 big_buffer = 1;
8561                 break;
8562         case I40E_TUNNEL_TYPE_GTPU:
8563                 if (!pf->gtp_replace_flag) {
8564                         i40e_replace_gtp_l1_filter(pf);
8565                         i40e_replace_gtp_cloud_filter(pf);
8566                         pf->gtp_replace_flag = 1;
8567                 }
8568                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8569                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8570                         (teid_le >> 16) & 0xFFFF;
8571                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8572                         teid_le & 0xFFFF;
8573                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8574                         0x0;
8575                 big_buffer = 1;
8576                 break;
8577         case I40E_TUNNEL_TYPE_QINQ:
8578                 if (!pf->qinq_replace_flag) {
8579                         ret = i40e_cloud_filter_qinq_create(pf);
8580                         if (ret < 0)
8581                                 PMD_DRV_LOG(DEBUG,
8582                                             "QinQ tunnel filter already created.");
8583                         pf->qinq_replace_flag = 1;
8584                 }
8585                 /*      Add in the General fields the values of
8586                  *      the Outer and Inner VLAN
8587                  *      Big Buffer should be set, see changes in
8588                  *      i40e_aq_add_cloud_filters
8589                  */
8590                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8591                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8592                 big_buffer = 1;
8593                 break;
8594         case I40E_CLOUD_TYPE_UDP:
8595         case I40E_CLOUD_TYPE_TCP:
8596         case I40E_CLOUD_TYPE_SCTP:
8597                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8598                         if (!pf->sport_replace_flag) {
8599                                 i40e_replace_port_l1_filter(pf,
8600                                                 tunnel_filter->l4_port_type);
8601                                 i40e_replace_port_cloud_filter(pf,
8602                                                 tunnel_filter->l4_port_type);
8603                                 pf->sport_replace_flag = 1;
8604                         }
8605                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8606                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8607                                 I40E_DIRECTION_INGRESS_KEY;
8608
8609                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8610                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8611                                         I40E_TR_L4_TYPE_UDP;
8612                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8613                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8614                                         I40E_TR_L4_TYPE_TCP;
8615                         else
8616                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8617                                         I40E_TR_L4_TYPE_SCTP;
8618
8619                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8620                                 (teid_le >> 16) & 0xFFFF;
8621                         big_buffer = 1;
8622                 } else {
8623                         if (!pf->dport_replace_flag) {
8624                                 i40e_replace_port_l1_filter(pf,
8625                                                 tunnel_filter->l4_port_type);
8626                                 i40e_replace_port_cloud_filter(pf,
8627                                                 tunnel_filter->l4_port_type);
8628                                 pf->dport_replace_flag = 1;
8629                         }
8630                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8631                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8632                                 I40E_DIRECTION_INGRESS_KEY;
8633
8634                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8635                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8636                                         I40E_TR_L4_TYPE_UDP;
8637                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8638                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8639                                         I40E_TR_L4_TYPE_TCP;
8640                         else
8641                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8642                                         I40E_TR_L4_TYPE_SCTP;
8643
8644                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8645                                 (teid_le >> 16) & 0xFFFF;
8646                         big_buffer = 1;
8647                 }
8648
8649                 break;
8650         default:
8651                 /* Other tunnel types is not supported. */
8652                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8653                 rte_free(cld_filter);
8654                 return -EINVAL;
8655         }
8656
8657         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8658                 pfilter->element.flags =
8659                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8660         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8661                 pfilter->element.flags =
8662                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8663         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8664                 pfilter->element.flags =
8665                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8666         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8667                 pfilter->element.flags =
8668                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8669         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8670                 pfilter->element.flags |=
8671                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8672         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8673                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8674                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8675                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8676                         pfilter->element.flags |=
8677                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8678                 else
8679                         pfilter->element.flags |=
8680                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8681         } else {
8682                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8683                                                 &pfilter->element.flags);
8684                 if (val < 0) {
8685                         rte_free(cld_filter);
8686                         return -EINVAL;
8687                 }
8688         }
8689
8690         pfilter->element.flags |= rte_cpu_to_le_16(
8691                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8692                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8693         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8694         pfilter->element.queue_number =
8695                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8696
8697         if (!tunnel_filter->is_to_vf)
8698                 vsi = pf->main_vsi;
8699         else {
8700                 if (tunnel_filter->vf_id >= pf->vf_num) {
8701                         PMD_DRV_LOG(ERR, "Invalid argument.");
8702                         rte_free(cld_filter);
8703                         return -EINVAL;
8704                 }
8705                 vf = &pf->vfs[tunnel_filter->vf_id];
8706                 vsi = vf->vsi;
8707         }
8708
8709         /* Check if there is the filter in SW list */
8710         memset(&check_filter, 0, sizeof(check_filter));
8711         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8712         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8713         check_filter.vf_id = tunnel_filter->vf_id;
8714         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8715         if (add && node) {
8716                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8717                 rte_free(cld_filter);
8718                 return -EINVAL;
8719         }
8720
8721         if (!add && !node) {
8722                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8723                 rte_free(cld_filter);
8724                 return -EINVAL;
8725         }
8726
8727         if (add) {
8728                 if (big_buffer)
8729                         ret = i40e_aq_add_cloud_filters_bb(hw,
8730                                                    vsi->seid, cld_filter, 1);
8731                 else
8732                         ret = i40e_aq_add_cloud_filters(hw,
8733                                         vsi->seid, &cld_filter->element, 1);
8734                 if (ret < 0) {
8735                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8736                         rte_free(cld_filter);
8737                         return -ENOTSUP;
8738                 }
8739                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8740                 if (tunnel == NULL) {
8741                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8742                         rte_free(cld_filter);
8743                         return -ENOMEM;
8744                 }
8745
8746                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8747                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8748                 if (ret < 0)
8749                         rte_free(tunnel);
8750         } else {
8751                 if (big_buffer)
8752                         ret = i40e_aq_rem_cloud_filters_bb(
8753                                 hw, vsi->seid, cld_filter, 1);
8754                 else
8755                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8756                                                 &cld_filter->element, 1);
8757                 if (ret < 0) {
8758                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8759                         rte_free(cld_filter);
8760                         return -ENOTSUP;
8761                 }
8762                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8763         }
8764
8765         rte_free(cld_filter);
8766         return ret;
8767 }
8768
8769 static int
8770 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8771 {
8772         uint8_t i;
8773
8774         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8775                 if (pf->vxlan_ports[i] == port)
8776                         return i;
8777         }
8778
8779         return -1;
8780 }
8781
8782 static int
8783 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8784 {
8785         int  idx, ret;
8786         uint8_t filter_idx = 0;
8787         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8788
8789         idx = i40e_get_vxlan_port_idx(pf, port);
8790
8791         /* Check if port already exists */
8792         if (idx >= 0) {
8793                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8794                 return -EINVAL;
8795         }
8796
8797         /* Now check if there is space to add the new port */
8798         idx = i40e_get_vxlan_port_idx(pf, 0);
8799         if (idx < 0) {
8800                 PMD_DRV_LOG(ERR,
8801                         "Maximum number of UDP ports reached, not adding port %d",
8802                         port);
8803                 return -ENOSPC;
8804         }
8805
8806         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8807                                         &filter_idx, NULL);
8808         if (ret < 0) {
8809                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8810                 return -1;
8811         }
8812
8813         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8814                          port,  filter_idx);
8815
8816         /* New port: add it and mark its index in the bitmap */
8817         pf->vxlan_ports[idx] = port;
8818         pf->vxlan_bitmap |= (1 << idx);
8819
8820         if (!(pf->flags & I40E_FLAG_VXLAN))
8821                 pf->flags |= I40E_FLAG_VXLAN;
8822
8823         return 0;
8824 }
8825
8826 static int
8827 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8828 {
8829         int idx;
8830         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8831
8832         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8833                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8834                 return -EINVAL;
8835         }
8836
8837         idx = i40e_get_vxlan_port_idx(pf, port);
8838
8839         if (idx < 0) {
8840                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8841                 return -EINVAL;
8842         }
8843
8844         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8845                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8846                 return -1;
8847         }
8848
8849         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8850                         port, idx);
8851
8852         pf->vxlan_ports[idx] = 0;
8853         pf->vxlan_bitmap &= ~(1 << idx);
8854
8855         if (!pf->vxlan_bitmap)
8856                 pf->flags &= ~I40E_FLAG_VXLAN;
8857
8858         return 0;
8859 }
8860
8861 /* Add UDP tunneling port */
8862 static int
8863 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8864                              struct rte_eth_udp_tunnel *udp_tunnel)
8865 {
8866         int ret = 0;
8867         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8868
8869         if (udp_tunnel == NULL)
8870                 return -EINVAL;
8871
8872         switch (udp_tunnel->prot_type) {
8873         case RTE_TUNNEL_TYPE_VXLAN:
8874                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8875                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8876                 break;
8877         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8878                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8879                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8880                 break;
8881         case RTE_TUNNEL_TYPE_GENEVE:
8882         case RTE_TUNNEL_TYPE_TEREDO:
8883                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8884                 ret = -1;
8885                 break;
8886
8887         default:
8888                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8889                 ret = -1;
8890                 break;
8891         }
8892
8893         return ret;
8894 }
8895
8896 /* Remove UDP tunneling port */
8897 static int
8898 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8899                              struct rte_eth_udp_tunnel *udp_tunnel)
8900 {
8901         int ret = 0;
8902         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8903
8904         if (udp_tunnel == NULL)
8905                 return -EINVAL;
8906
8907         switch (udp_tunnel->prot_type) {
8908         case RTE_TUNNEL_TYPE_VXLAN:
8909         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8910                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8911                 break;
8912         case RTE_TUNNEL_TYPE_GENEVE:
8913         case RTE_TUNNEL_TYPE_TEREDO:
8914                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8915                 ret = -1;
8916                 break;
8917         default:
8918                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8919                 ret = -1;
8920                 break;
8921         }
8922
8923         return ret;
8924 }
8925
8926 /* Calculate the maximum number of contiguous PF queues that are configured */
8927 static int
8928 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8929 {
8930         struct rte_eth_dev_data *data = pf->dev_data;
8931         int i, num;
8932         struct i40e_rx_queue *rxq;
8933
8934         num = 0;
8935         for (i = 0; i < pf->lan_nb_qps; i++) {
8936                 rxq = data->rx_queues[i];
8937                 if (rxq && rxq->q_set)
8938                         num++;
8939                 else
8940                         break;
8941         }
8942
8943         return num;
8944 }
8945
8946 /* Configure RSS */
8947 static int
8948 i40e_pf_config_rss(struct i40e_pf *pf)
8949 {
8950         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8951         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8952         struct rte_eth_rss_conf rss_conf;
8953         uint32_t i, lut = 0;
8954         uint16_t j, num;
8955
8956         /*
8957          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8958          * It's necessary to calculate the actual PF queues that are configured.
8959          */
8960         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8961                 num = i40e_pf_calc_configured_queues_num(pf);
8962         else
8963                 num = pf->dev_data->nb_rx_queues;
8964
8965         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8966         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8967                         num);
8968
8969         if (num == 0) {
8970                 PMD_INIT_LOG(ERR,
8971                         "No PF queues are configured to enable RSS for port %u",
8972                         pf->dev_data->port_id);
8973                 return -ENOTSUP;
8974         }
8975
8976         if (pf->adapter->rss_reta_updated == 0) {
8977                 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8978                         if (j == num)
8979                                 j = 0;
8980                         lut = (lut << 8) | (j & ((0x1 <<
8981                                 hw->func_caps.rss_table_entry_width) - 1));
8982                         if ((i & 3) == 3)
8983                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
8984                                                rte_bswap32(lut));
8985                 }
8986         }
8987
8988         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8989         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
8990             !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
8991                 i40e_pf_disable_rss(pf);
8992                 return 0;
8993         }
8994         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8995                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8996                 /* Random default keys */
8997                 static uint32_t rss_key_default[] = {0x6b793944,
8998                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8999                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
9000                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
9001
9002                 rss_conf.rss_key = (uint8_t *)rss_key_default;
9003                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
9004                                                         sizeof(uint32_t);
9005         }
9006
9007         return i40e_hw_rss_hash_set(pf, &rss_conf);
9008 }
9009
9010 static int
9011 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
9012                                struct rte_eth_tunnel_filter_conf *filter)
9013 {
9014         if (pf == NULL || filter == NULL) {
9015                 PMD_DRV_LOG(ERR, "Invalid parameter");
9016                 return -EINVAL;
9017         }
9018
9019         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
9020                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9021                 return -EINVAL;
9022         }
9023
9024         if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
9025                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
9026                 return -EINVAL;
9027         }
9028
9029         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
9030                 (rte_is_zero_ether_addr(&filter->outer_mac))) {
9031                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
9032                 return -EINVAL;
9033         }
9034
9035         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
9036                 (rte_is_zero_ether_addr(&filter->inner_mac))) {
9037                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
9038                 return -EINVAL;
9039         }
9040
9041         return 0;
9042 }
9043
9044 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
9045 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
9046 int
9047 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
9048 {
9049         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9050         uint32_t val, reg;
9051         int ret = -EINVAL;
9052
9053         if (pf->support_multi_driver) {
9054                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
9055                 return -ENOTSUP;
9056         }
9057
9058         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
9059         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
9060
9061         if (len == 3) {
9062                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
9063         } else if (len == 4) {
9064                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
9065         } else {
9066                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
9067                 return ret;
9068         }
9069
9070         if (reg != val) {
9071                 ret = i40e_aq_debug_write_global_register(hw,
9072                                                    I40E_GL_PRS_FVBM(2),
9073                                                    reg, NULL);
9074                 if (ret != 0)
9075                         return ret;
9076                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
9077                             "with value 0x%08x",
9078                             I40E_GL_PRS_FVBM(2), reg);
9079         } else {
9080                 ret = 0;
9081         }
9082         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9083                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9084
9085         return ret;
9086 }
9087
9088 static int
9089 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
9090 {
9091         int ret = -EINVAL;
9092
9093         if (!hw || !cfg)
9094                 return -EINVAL;
9095
9096         switch (cfg->cfg_type) {
9097         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
9098                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
9099                 break;
9100         default:
9101                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
9102                 break;
9103         }
9104
9105         return ret;
9106 }
9107
9108 static int
9109 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
9110                                enum rte_filter_op filter_op,
9111                                void *arg)
9112 {
9113         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9114         int ret = I40E_ERR_PARAM;
9115
9116         switch (filter_op) {
9117         case RTE_ETH_FILTER_SET:
9118                 ret = i40e_dev_global_config_set(hw,
9119                         (struct rte_eth_global_cfg *)arg);
9120                 break;
9121         default:
9122                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9123                 break;
9124         }
9125
9126         return ret;
9127 }
9128
9129 static int
9130 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
9131                           enum rte_filter_op filter_op,
9132                           void *arg)
9133 {
9134         struct rte_eth_tunnel_filter_conf *filter;
9135         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9136         int ret = I40E_SUCCESS;
9137
9138         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
9139
9140         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
9141                 return I40E_ERR_PARAM;
9142
9143         switch (filter_op) {
9144         case RTE_ETH_FILTER_NOP:
9145                 if (!(pf->flags & I40E_FLAG_VXLAN))
9146                         ret = I40E_NOT_SUPPORTED;
9147                 break;
9148         case RTE_ETH_FILTER_ADD:
9149                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
9150                 break;
9151         case RTE_ETH_FILTER_DELETE:
9152                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
9153                 break;
9154         default:
9155                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9156                 ret = I40E_ERR_PARAM;
9157                 break;
9158         }
9159
9160         return ret;
9161 }
9162
9163 /* Get the symmetric hash enable configurations per port */
9164 static void
9165 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
9166 {
9167         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9168
9169         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
9170 }
9171
9172 /* Set the symmetric hash enable configurations per port */
9173 static void
9174 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9175 {
9176         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9177
9178         if (enable > 0) {
9179                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
9180                         PMD_DRV_LOG(INFO,
9181                                 "Symmetric hash has already been enabled");
9182                         return;
9183                 }
9184                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9185         } else {
9186                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
9187                         PMD_DRV_LOG(INFO,
9188                                 "Symmetric hash has already been disabled");
9189                         return;
9190                 }
9191                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9192         }
9193         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9194         I40E_WRITE_FLUSH(hw);
9195 }
9196
9197 /*
9198  * Get global configurations of hash function type and symmetric hash enable
9199  * per flow type (pctype). Note that global configuration means it affects all
9200  * the ports on the same NIC.
9201  */
9202 static int
9203 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
9204                                    struct rte_eth_hash_global_conf *g_cfg)
9205 {
9206         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9207         uint32_t reg;
9208         uint16_t i, j;
9209
9210         memset(g_cfg, 0, sizeof(*g_cfg));
9211         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9212         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
9213                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
9214         else
9215                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
9216         PMD_DRV_LOG(DEBUG, "Hash function is %s",
9217                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
9218
9219         /*
9220          * As i40e supports less than 64 flow types, only first 64 bits need to
9221          * be checked.
9222          */
9223         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9224                 g_cfg->valid_bit_mask[i] = 0ULL;
9225                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
9226         }
9227
9228         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
9229
9230         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
9231                 if (!adapter->pctypes_tbl[i])
9232                         continue;
9233                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9234                      j < I40E_FILTER_PCTYPE_MAX; j++) {
9235                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
9236                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
9237                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
9238                                         g_cfg->sym_hash_enable_mask[0] |=
9239                                                                 (1ULL << i);
9240                                 }
9241                         }
9242                 }
9243         }
9244
9245         return 0;
9246 }
9247
9248 static int
9249 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
9250                               const struct rte_eth_hash_global_conf *g_cfg)
9251 {
9252         uint32_t i;
9253         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
9254
9255         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
9256                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
9257                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
9258                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
9259                                                 g_cfg->hash_func);
9260                 return -EINVAL;
9261         }
9262
9263         /*
9264          * As i40e supports less than 64 flow types, only first 64 bits need to
9265          * be checked.
9266          */
9267         mask0 = g_cfg->valid_bit_mask[0];
9268         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9269                 if (i == 0) {
9270                         /* Check if any unsupported flow type configured */
9271                         if ((mask0 | i40e_mask) ^ i40e_mask)
9272                                 goto mask_err;
9273                 } else {
9274                         if (g_cfg->valid_bit_mask[i])
9275                                 goto mask_err;
9276                 }
9277         }
9278
9279         return 0;
9280
9281 mask_err:
9282         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
9283
9284         return -EINVAL;
9285 }
9286
9287 /*
9288  * Set global configurations of hash function type and symmetric hash enable
9289  * per flow type (pctype). Note any modifying global configuration will affect
9290  * all the ports on the same NIC.
9291  */
9292 static int
9293 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
9294                                    struct rte_eth_hash_global_conf *g_cfg)
9295 {
9296         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9297         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9298         int ret;
9299         uint16_t i, j;
9300         uint32_t reg;
9301         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
9302
9303         if (pf->support_multi_driver) {
9304                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
9305                 return -ENOTSUP;
9306         }
9307
9308         /* Check the input parameters */
9309         ret = i40e_hash_global_config_check(adapter, g_cfg);
9310         if (ret < 0)
9311                 return ret;
9312
9313         /*
9314          * As i40e supports less than 64 flow types, only first 64 bits need to
9315          * be configured.
9316          */
9317         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
9318                 if (mask0 & (1UL << i)) {
9319                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
9320                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
9321
9322                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9323                              j < I40E_FILTER_PCTYPE_MAX; j++) {
9324                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
9325                                         i40e_write_global_rx_ctl(hw,
9326                                                           I40E_GLQF_HSYM(j),
9327                                                           reg);
9328                         }
9329                 }
9330         }
9331
9332         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9333         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
9334                 /* Toeplitz */
9335                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
9336                         PMD_DRV_LOG(DEBUG,
9337                                 "Hash function already set to Toeplitz");
9338                         goto out;
9339                 }
9340                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
9341         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
9342                 /* Simple XOR */
9343                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
9344                         PMD_DRV_LOG(DEBUG,
9345                                 "Hash function already set to Simple XOR");
9346                         goto out;
9347                 }
9348                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
9349         } else
9350                 /* Use the default, and keep it as it is */
9351                 goto out;
9352
9353         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
9354
9355 out:
9356         I40E_WRITE_FLUSH(hw);
9357
9358         return 0;
9359 }
9360
9361 /**
9362  * Valid input sets for hash and flow director filters per PCTYPE
9363  */
9364 static uint64_t
9365 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9366                 enum rte_filter_type filter)
9367 {
9368         uint64_t valid;
9369
9370         static const uint64_t valid_hash_inset_table[] = {
9371                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9372                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9373                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9374                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9375                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9376                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9377                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9378                         I40E_INSET_FLEX_PAYLOAD,
9379                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9380                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9381                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9382                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9383                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9384                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9385                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9386                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9387                         I40E_INSET_FLEX_PAYLOAD,
9388                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9389                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9390                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9391                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9392                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9393                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9394                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9395                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9396                         I40E_INSET_FLEX_PAYLOAD,
9397                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9398                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9399                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9400                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9401                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9402                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9403                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9404                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9405                         I40E_INSET_FLEX_PAYLOAD,
9406                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9407                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9408                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9409                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9410                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9411                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9412                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9413                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9414                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9415                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9416                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9417                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9418                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9419                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9420                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9421                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9422                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9423                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9424                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9425                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9426                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9427                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9428                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9429                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9430                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9431                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9432                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9433                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9434                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9435                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9436                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9437                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9438                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9439                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9440                         I40E_INSET_FLEX_PAYLOAD,
9441                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9442                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9443                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9444                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9445                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9446                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9447                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9448                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9449                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9450                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9451                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9452                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9453                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9454                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9455                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9456                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9457                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9458                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9459                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9460                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9461                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9462                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9463                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9464                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9465                         I40E_INSET_FLEX_PAYLOAD,
9466                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9467                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9468                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9469                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9470                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9471                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9472                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9473                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9474                         I40E_INSET_FLEX_PAYLOAD,
9475                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9476                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9477                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9478                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9479                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9480                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9481                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9482                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9483                         I40E_INSET_FLEX_PAYLOAD,
9484                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9485                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9486                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9487                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9488                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9489                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9490                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9491                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9492                         I40E_INSET_FLEX_PAYLOAD,
9493                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9494                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9495                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9496                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9497                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9498                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9499                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9500                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9501                         I40E_INSET_FLEX_PAYLOAD,
9502                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9503                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9504                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9505                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9506                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9507                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9508                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9509                         I40E_INSET_FLEX_PAYLOAD,
9510                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9511                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9512                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9513                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9514                         I40E_INSET_FLEX_PAYLOAD,
9515         };
9516
9517         /**
9518          * Flow director supports only fields defined in
9519          * union rte_eth_fdir_flow.
9520          */
9521         static const uint64_t valid_fdir_inset_table[] = {
9522                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9523                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9524                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9525                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9526                 I40E_INSET_IPV4_TTL,
9527                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9528                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9529                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9530                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9531                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9532                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9533                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9534                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9535                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9536                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9537                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9538                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9539                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9540                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9541                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9542                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9543                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9544                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9545                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9546                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9547                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9548                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9549                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9550                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9551                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9552                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9553                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9554                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9555                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9556                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9557                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9558                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9559                 I40E_INSET_SCTP_VT,
9560                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9561                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9562                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9563                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9564                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9565                 I40E_INSET_IPV4_TTL,
9566                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9567                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9568                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9569                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9570                 I40E_INSET_IPV6_HOP_LIMIT,
9571                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9572                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9573                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9574                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9575                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9576                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9577                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9578                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9579                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9580                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9581                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9582                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9583                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9584                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9585                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9586                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9587                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9588                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9589                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9590                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9591                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9592                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9593                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9594                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9595                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9596                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9597                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9598                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9599                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9600                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9601                 I40E_INSET_SCTP_VT,
9602                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9603                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9604                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9605                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9606                 I40E_INSET_IPV6_HOP_LIMIT,
9607                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9608                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9609                 I40E_INSET_LAST_ETHER_TYPE,
9610         };
9611
9612         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9613                 return 0;
9614         if (filter == RTE_ETH_FILTER_HASH)
9615                 valid = valid_hash_inset_table[pctype];
9616         else
9617                 valid = valid_fdir_inset_table[pctype];
9618
9619         return valid;
9620 }
9621
9622 /**
9623  * Validate if the input set is allowed for a specific PCTYPE
9624  */
9625 int
9626 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9627                 enum rte_filter_type filter, uint64_t inset)
9628 {
9629         uint64_t valid;
9630
9631         valid = i40e_get_valid_input_set(pctype, filter);
9632         if (inset & (~valid))
9633                 return -EINVAL;
9634
9635         return 0;
9636 }
9637
9638 /* default input set fields combination per pctype */
9639 uint64_t
9640 i40e_get_default_input_set(uint16_t pctype)
9641 {
9642         static const uint64_t default_inset_table[] = {
9643                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9644                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9645                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9646                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9647                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9648                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9649                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9650                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9651                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9652                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9653                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9654                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9655                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9656                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9657                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9658                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9659                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9660                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9661                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9662                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9663                         I40E_INSET_SCTP_VT,
9664                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9665                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9666                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9667                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9668                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9669                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9670                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9671                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9672                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9673                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9674                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9675                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9676                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9677                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9678                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9679                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9680                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9681                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9682                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9683                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9684                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9685                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9686                         I40E_INSET_SCTP_VT,
9687                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9688                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9689                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9690                         I40E_INSET_LAST_ETHER_TYPE,
9691         };
9692
9693         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9694                 return 0;
9695
9696         return default_inset_table[pctype];
9697 }
9698
9699 /**
9700  * Parse the input set from index to logical bit masks
9701  */
9702 static int
9703 i40e_parse_input_set(uint64_t *inset,
9704                      enum i40e_filter_pctype pctype,
9705                      enum rte_eth_input_set_field *field,
9706                      uint16_t size)
9707 {
9708         uint16_t i, j;
9709         int ret = -EINVAL;
9710
9711         static const struct {
9712                 enum rte_eth_input_set_field field;
9713                 uint64_t inset;
9714         } inset_convert_table[] = {
9715                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9716                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9717                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9718                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9719                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9720                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9721                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9722                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9723                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9724                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9725                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9726                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9727                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9728                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9729                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9730                         I40E_INSET_IPV6_NEXT_HDR},
9731                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9732                         I40E_INSET_IPV6_HOP_LIMIT},
9733                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9734                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9735                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9736                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9737                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9738                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9739                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9740                         I40E_INSET_SCTP_VT},
9741                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9742                         I40E_INSET_TUNNEL_DMAC},
9743                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9744                         I40E_INSET_VLAN_TUNNEL},
9745                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9746                         I40E_INSET_TUNNEL_ID},
9747                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9748                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9749                         I40E_INSET_FLEX_PAYLOAD_W1},
9750                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9751                         I40E_INSET_FLEX_PAYLOAD_W2},
9752                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9753                         I40E_INSET_FLEX_PAYLOAD_W3},
9754                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9755                         I40E_INSET_FLEX_PAYLOAD_W4},
9756                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9757                         I40E_INSET_FLEX_PAYLOAD_W5},
9758                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9759                         I40E_INSET_FLEX_PAYLOAD_W6},
9760                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9761                         I40E_INSET_FLEX_PAYLOAD_W7},
9762                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9763                         I40E_INSET_FLEX_PAYLOAD_W8},
9764         };
9765
9766         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9767                 return ret;
9768
9769         /* Only one item allowed for default or all */
9770         if (size == 1) {
9771                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9772                         *inset = i40e_get_default_input_set(pctype);
9773                         return 0;
9774                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9775                         *inset = I40E_INSET_NONE;
9776                         return 0;
9777                 }
9778         }
9779
9780         for (i = 0, *inset = 0; i < size; i++) {
9781                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9782                         if (field[i] == inset_convert_table[j].field) {
9783                                 *inset |= inset_convert_table[j].inset;
9784                                 break;
9785                         }
9786                 }
9787
9788                 /* It contains unsupported input set, return immediately */
9789                 if (j == RTE_DIM(inset_convert_table))
9790                         return ret;
9791         }
9792
9793         return 0;
9794 }
9795
9796 /**
9797  * Translate the input set from bit masks to register aware bit masks
9798  * and vice versa
9799  */
9800 uint64_t
9801 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9802 {
9803         uint64_t val = 0;
9804         uint16_t i;
9805
9806         struct inset_map {
9807                 uint64_t inset;
9808                 uint64_t inset_reg;
9809         };
9810
9811         static const struct inset_map inset_map_common[] = {
9812                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9813                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9814                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9815                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9816                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9817                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9818                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9819                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9820                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9821                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9822                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9823                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9824                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9825                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9826                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9827                 {I40E_INSET_TUNNEL_DMAC,
9828                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9829                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9830                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9831                 {I40E_INSET_TUNNEL_SRC_PORT,
9832                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9833                 {I40E_INSET_TUNNEL_DST_PORT,
9834                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9835                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9836                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9837                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9838                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9839                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9840                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9841                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9842                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9843                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9844         };
9845
9846     /* some different registers map in x722*/
9847         static const struct inset_map inset_map_diff_x722[] = {
9848                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9849                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9850                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9851                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9852         };
9853
9854         static const struct inset_map inset_map_diff_not_x722[] = {
9855                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9856                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9857                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9858                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9859         };
9860
9861         if (input == 0)
9862                 return val;
9863
9864         /* Translate input set to register aware inset */
9865         if (type == I40E_MAC_X722) {
9866                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9867                         if (input & inset_map_diff_x722[i].inset)
9868                                 val |= inset_map_diff_x722[i].inset_reg;
9869                 }
9870         } else {
9871                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9872                         if (input & inset_map_diff_not_x722[i].inset)
9873                                 val |= inset_map_diff_not_x722[i].inset_reg;
9874                 }
9875         }
9876
9877         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9878                 if (input & inset_map_common[i].inset)
9879                         val |= inset_map_common[i].inset_reg;
9880         }
9881
9882         return val;
9883 }
9884
9885 int
9886 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9887 {
9888         uint8_t i, idx = 0;
9889         uint64_t inset_need_mask = inset;
9890
9891         static const struct {
9892                 uint64_t inset;
9893                 uint32_t mask;
9894         } inset_mask_map[] = {
9895                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9896                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9897                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9898                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9899                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9900                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9901                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9902                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9903         };
9904
9905         if (!inset || !mask || !nb_elem)
9906                 return 0;
9907
9908         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9909                 /* Clear the inset bit, if no MASK is required,
9910                  * for example proto + ttl
9911                  */
9912                 if ((inset & inset_mask_map[i].inset) ==
9913                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9914                         inset_need_mask &= ~inset_mask_map[i].inset;
9915                 if (!inset_need_mask)
9916                         return 0;
9917         }
9918         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9919                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9920                     inset_mask_map[i].inset) {
9921                         if (idx >= nb_elem) {
9922                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9923                                 return -EINVAL;
9924                         }
9925                         mask[idx] = inset_mask_map[i].mask;
9926                         idx++;
9927                 }
9928         }
9929
9930         return idx;
9931 }
9932
9933 void
9934 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9935 {
9936         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9937
9938         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9939         if (reg != val)
9940                 i40e_write_rx_ctl(hw, addr, val);
9941         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9942                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9943 }
9944
9945 void
9946 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9947 {
9948         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9949         struct rte_eth_dev *dev;
9950
9951         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
9952         if (reg != val) {
9953                 i40e_write_rx_ctl(hw, addr, val);
9954                 PMD_DRV_LOG(WARNING,
9955                             "i40e device %s changed global register [0x%08x]."
9956                             " original: 0x%08x, new: 0x%08x",
9957                             dev->device->name, addr, reg,
9958                             (uint32_t)i40e_read_rx_ctl(hw, addr));
9959         }
9960 }
9961
9962 static void
9963 i40e_filter_input_set_init(struct i40e_pf *pf)
9964 {
9965         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9966         enum i40e_filter_pctype pctype;
9967         uint64_t input_set, inset_reg;
9968         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9969         int num, i;
9970         uint16_t flow_type;
9971
9972         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9973              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9974                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9975
9976                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9977                         continue;
9978
9979                 input_set = i40e_get_default_input_set(pctype);
9980
9981                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9982                                                    I40E_INSET_MASK_NUM_REG);
9983                 if (num < 0)
9984                         return;
9985                 if (pf->support_multi_driver && num > 0) {
9986                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9987                         return;
9988                 }
9989                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9990                                         input_set);
9991
9992                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9993                                       (uint32_t)(inset_reg & UINT32_MAX));
9994                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9995                                      (uint32_t)((inset_reg >>
9996                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9997                 if (!pf->support_multi_driver) {
9998                         i40e_check_write_global_reg(hw,
9999                                             I40E_GLQF_HASH_INSET(0, pctype),
10000                                             (uint32_t)(inset_reg & UINT32_MAX));
10001                         i40e_check_write_global_reg(hw,
10002                                              I40E_GLQF_HASH_INSET(1, pctype),
10003                                              (uint32_t)((inset_reg >>
10004                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
10005
10006                         for (i = 0; i < num; i++) {
10007                                 i40e_check_write_global_reg(hw,
10008                                                     I40E_GLQF_FD_MSK(i, pctype),
10009                                                     mask_reg[i]);
10010                                 i40e_check_write_global_reg(hw,
10011                                                   I40E_GLQF_HASH_MSK(i, pctype),
10012                                                   mask_reg[i]);
10013                         }
10014                         /*clear unused mask registers of the pctype */
10015                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
10016                                 i40e_check_write_global_reg(hw,
10017                                                     I40E_GLQF_FD_MSK(i, pctype),
10018                                                     0);
10019                                 i40e_check_write_global_reg(hw,
10020                                                   I40E_GLQF_HASH_MSK(i, pctype),
10021                                                   0);
10022                         }
10023                 } else {
10024                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
10025                 }
10026                 I40E_WRITE_FLUSH(hw);
10027
10028                 /* store the default input set */
10029                 if (!pf->support_multi_driver)
10030                         pf->hash_input_set[pctype] = input_set;
10031                 pf->fdir.input_set[pctype] = input_set;
10032         }
10033 }
10034
10035 int
10036 i40e_hash_filter_inset_select(struct i40e_hw *hw,
10037                          struct rte_eth_input_set_conf *conf)
10038 {
10039         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
10040         enum i40e_filter_pctype pctype;
10041         uint64_t input_set, inset_reg = 0;
10042         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10043         int ret, i, num;
10044
10045         if (!conf) {
10046                 PMD_DRV_LOG(ERR, "Invalid pointer");
10047                 return -EFAULT;
10048         }
10049         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10050             conf->op != RTE_ETH_INPUT_SET_ADD) {
10051                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10052                 return -EINVAL;
10053         }
10054
10055         if (pf->support_multi_driver) {
10056                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
10057                 return -ENOTSUP;
10058         }
10059
10060         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10061         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10062                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10063                 return -EINVAL;
10064         }
10065
10066         if (hw->mac.type == I40E_MAC_X722) {
10067                 /* get translated pctype value in fd pctype register */
10068                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
10069                         I40E_GLQF_FD_PCTYPES((int)pctype));
10070         }
10071
10072         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10073                                    conf->inset_size);
10074         if (ret) {
10075                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10076                 return -EINVAL;
10077         }
10078
10079         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
10080                 /* get inset value in register */
10081                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
10082                 inset_reg <<= I40E_32_BIT_WIDTH;
10083                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
10084                 input_set |= pf->hash_input_set[pctype];
10085         }
10086         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10087                                            I40E_INSET_MASK_NUM_REG);
10088         if (num < 0)
10089                 return -EINVAL;
10090
10091         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10092
10093         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
10094                                     (uint32_t)(inset_reg & UINT32_MAX));
10095         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
10096                                     (uint32_t)((inset_reg >>
10097                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
10098
10099         for (i = 0; i < num; i++)
10100                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10101                                             mask_reg[i]);
10102         /*clear unused mask registers of the pctype */
10103         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10104                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10105                                             0);
10106         I40E_WRITE_FLUSH(hw);
10107
10108         pf->hash_input_set[pctype] = input_set;
10109         return 0;
10110 }
10111
10112 int
10113 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
10114                          struct rte_eth_input_set_conf *conf)
10115 {
10116         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10117         enum i40e_filter_pctype pctype;
10118         uint64_t input_set, inset_reg = 0;
10119         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10120         int ret, i, num;
10121
10122         if (!hw || !conf) {
10123                 PMD_DRV_LOG(ERR, "Invalid pointer");
10124                 return -EFAULT;
10125         }
10126         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10127             conf->op != RTE_ETH_INPUT_SET_ADD) {
10128                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10129                 return -EINVAL;
10130         }
10131
10132         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10133
10134         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10135                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10136                 return -EINVAL;
10137         }
10138
10139         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10140                                    conf->inset_size);
10141         if (ret) {
10142                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10143                 return -EINVAL;
10144         }
10145
10146         /* get inset value in register */
10147         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
10148         inset_reg <<= I40E_32_BIT_WIDTH;
10149         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
10150
10151         /* Can not change the inset reg for flex payload for fdir,
10152          * it is done by writing I40E_PRTQF_FD_FLXINSET
10153          * in i40e_set_flex_mask_on_pctype.
10154          */
10155         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
10156                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
10157         else
10158                 input_set |= pf->fdir.input_set[pctype];
10159         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10160                                            I40E_INSET_MASK_NUM_REG);
10161         if (num < 0)
10162                 return -EINVAL;
10163         if (pf->support_multi_driver && num > 0) {
10164                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10165                 return -ENOTSUP;
10166         }
10167
10168         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10169
10170         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
10171                               (uint32_t)(inset_reg & UINT32_MAX));
10172         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
10173                              (uint32_t)((inset_reg >>
10174                              I40E_32_BIT_WIDTH) & UINT32_MAX));
10175
10176         if (!pf->support_multi_driver) {
10177                 for (i = 0; i < num; i++)
10178                         i40e_check_write_global_reg(hw,
10179                                                     I40E_GLQF_FD_MSK(i, pctype),
10180                                                     mask_reg[i]);
10181                 /*clear unused mask registers of the pctype */
10182                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10183                         i40e_check_write_global_reg(hw,
10184                                                     I40E_GLQF_FD_MSK(i, pctype),
10185                                                     0);
10186         } else {
10187                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10188         }
10189         I40E_WRITE_FLUSH(hw);
10190
10191         pf->fdir.input_set[pctype] = input_set;
10192         return 0;
10193 }
10194
10195 static int
10196 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10197 {
10198         int ret = 0;
10199
10200         if (!hw || !info) {
10201                 PMD_DRV_LOG(ERR, "Invalid pointer");
10202                 return -EFAULT;
10203         }
10204
10205         switch (info->info_type) {
10206         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10207                 i40e_get_symmetric_hash_enable_per_port(hw,
10208                                         &(info->info.enable));
10209                 break;
10210         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10211                 ret = i40e_get_hash_filter_global_config(hw,
10212                                 &(info->info.global_conf));
10213                 break;
10214         default:
10215                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10216                                                         info->info_type);
10217                 ret = -EINVAL;
10218                 break;
10219         }
10220
10221         return ret;
10222 }
10223
10224 static int
10225 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10226 {
10227         int ret = 0;
10228
10229         if (!hw || !info) {
10230                 PMD_DRV_LOG(ERR, "Invalid pointer");
10231                 return -EFAULT;
10232         }
10233
10234         switch (info->info_type) {
10235         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10236                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
10237                 break;
10238         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10239                 ret = i40e_set_hash_filter_global_config(hw,
10240                                 &(info->info.global_conf));
10241                 break;
10242         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
10243                 ret = i40e_hash_filter_inset_select(hw,
10244                                                &(info->info.input_set_conf));
10245                 break;
10246
10247         default:
10248                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10249                                                         info->info_type);
10250                 ret = -EINVAL;
10251                 break;
10252         }
10253
10254         return ret;
10255 }
10256
10257 /* Operations for hash function */
10258 static int
10259 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
10260                       enum rte_filter_op filter_op,
10261                       void *arg)
10262 {
10263         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10264         int ret = 0;
10265
10266         switch (filter_op) {
10267         case RTE_ETH_FILTER_NOP:
10268                 break;
10269         case RTE_ETH_FILTER_GET:
10270                 ret = i40e_hash_filter_get(hw,
10271                         (struct rte_eth_hash_filter_info *)arg);
10272                 break;
10273         case RTE_ETH_FILTER_SET:
10274                 ret = i40e_hash_filter_set(hw,
10275                         (struct rte_eth_hash_filter_info *)arg);
10276                 break;
10277         default:
10278                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
10279                                                                 filter_op);
10280                 ret = -ENOTSUP;
10281                 break;
10282         }
10283
10284         return ret;
10285 }
10286
10287 /* Convert ethertype filter structure */
10288 static int
10289 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
10290                               struct i40e_ethertype_filter *filter)
10291 {
10292         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
10293                 RTE_ETHER_ADDR_LEN);
10294         filter->input.ether_type = input->ether_type;
10295         filter->flags = input->flags;
10296         filter->queue = input->queue;
10297
10298         return 0;
10299 }
10300
10301 /* Check if there exists the ehtertype filter */
10302 struct i40e_ethertype_filter *
10303 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
10304                                 const struct i40e_ethertype_filter_input *input)
10305 {
10306         int ret;
10307
10308         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
10309         if (ret < 0)
10310                 return NULL;
10311
10312         return ethertype_rule->hash_map[ret];
10313 }
10314
10315 /* Add ethertype filter in SW list */
10316 static int
10317 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
10318                                 struct i40e_ethertype_filter *filter)
10319 {
10320         struct i40e_ethertype_rule *rule = &pf->ethertype;
10321         int ret;
10322
10323         ret = rte_hash_add_key(rule->hash_table, &filter->input);
10324         if (ret < 0) {
10325                 PMD_DRV_LOG(ERR,
10326                             "Failed to insert ethertype filter"
10327                             " to hash table %d!",
10328                             ret);
10329                 return ret;
10330         }
10331         rule->hash_map[ret] = filter;
10332
10333         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
10334
10335         return 0;
10336 }
10337
10338 /* Delete ethertype filter in SW list */
10339 int
10340 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
10341                              struct i40e_ethertype_filter_input *input)
10342 {
10343         struct i40e_ethertype_rule *rule = &pf->ethertype;
10344         struct i40e_ethertype_filter *filter;
10345         int ret;
10346
10347         ret = rte_hash_del_key(rule->hash_table, input);
10348         if (ret < 0) {
10349                 PMD_DRV_LOG(ERR,
10350                             "Failed to delete ethertype filter"
10351                             " to hash table %d!",
10352                             ret);
10353                 return ret;
10354         }
10355         filter = rule->hash_map[ret];
10356         rule->hash_map[ret] = NULL;
10357
10358         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
10359         rte_free(filter);
10360
10361         return 0;
10362 }
10363
10364 /*
10365  * Configure ethertype filter, which can director packet by filtering
10366  * with mac address and ether_type or only ether_type
10367  */
10368 int
10369 i40e_ethertype_filter_set(struct i40e_pf *pf,
10370                         struct rte_eth_ethertype_filter *filter,
10371                         bool add)
10372 {
10373         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10374         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
10375         struct i40e_ethertype_filter *ethertype_filter, *node;
10376         struct i40e_ethertype_filter check_filter;
10377         struct i40e_control_filter_stats stats;
10378         uint16_t flags = 0;
10379         int ret;
10380
10381         if (filter->queue >= pf->dev_data->nb_rx_queues) {
10382                 PMD_DRV_LOG(ERR, "Invalid queue ID");
10383                 return -EINVAL;
10384         }
10385         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
10386                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
10387                 PMD_DRV_LOG(ERR,
10388                         "unsupported ether_type(0x%04x) in control packet filter.",
10389                         filter->ether_type);
10390                 return -EINVAL;
10391         }
10392         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
10393                 PMD_DRV_LOG(WARNING,
10394                         "filter vlan ether_type in first tag is not supported.");
10395
10396         /* Check if there is the filter in SW list */
10397         memset(&check_filter, 0, sizeof(check_filter));
10398         i40e_ethertype_filter_convert(filter, &check_filter);
10399         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
10400                                                &check_filter.input);
10401         if (add && node) {
10402                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
10403                 return -EINVAL;
10404         }
10405
10406         if (!add && !node) {
10407                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
10408                 return -EINVAL;
10409         }
10410
10411         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
10412                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10413         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
10414                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10415         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10416
10417         memset(&stats, 0, sizeof(stats));
10418         ret = i40e_aq_add_rem_control_packet_filter(hw,
10419                         filter->mac_addr.addr_bytes,
10420                         filter->ether_type, flags,
10421                         pf->main_vsi->seid,
10422                         filter->queue, add, &stats, NULL);
10423
10424         PMD_DRV_LOG(INFO,
10425                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
10426                 ret, stats.mac_etype_used, stats.etype_used,
10427                 stats.mac_etype_free, stats.etype_free);
10428         if (ret < 0)
10429                 return -ENOSYS;
10430
10431         /* Add or delete a filter in SW list */
10432         if (add) {
10433                 ethertype_filter = rte_zmalloc("ethertype_filter",
10434                                        sizeof(*ethertype_filter), 0);
10435                 if (ethertype_filter == NULL) {
10436                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
10437                         return -ENOMEM;
10438                 }
10439
10440                 rte_memcpy(ethertype_filter, &check_filter,
10441                            sizeof(check_filter));
10442                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
10443                 if (ret < 0)
10444                         rte_free(ethertype_filter);
10445         } else {
10446                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
10447         }
10448
10449         return ret;
10450 }
10451
10452 /*
10453  * Handle operations for ethertype filter.
10454  */
10455 static int
10456 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
10457                                 enum rte_filter_op filter_op,
10458                                 void *arg)
10459 {
10460         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10461         int ret = 0;
10462
10463         if (filter_op == RTE_ETH_FILTER_NOP)
10464                 return ret;
10465
10466         if (arg == NULL) {
10467                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
10468                             filter_op);
10469                 return -EINVAL;
10470         }
10471
10472         switch (filter_op) {
10473         case RTE_ETH_FILTER_ADD:
10474                 ret = i40e_ethertype_filter_set(pf,
10475                         (struct rte_eth_ethertype_filter *)arg,
10476                         TRUE);
10477                 break;
10478         case RTE_ETH_FILTER_DELETE:
10479                 ret = i40e_ethertype_filter_set(pf,
10480                         (struct rte_eth_ethertype_filter *)arg,
10481                         FALSE);
10482                 break;
10483         default:
10484                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
10485                 ret = -ENOSYS;
10486                 break;
10487         }
10488         return ret;
10489 }
10490
10491 static int
10492 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
10493                      enum rte_filter_type filter_type,
10494                      enum rte_filter_op filter_op,
10495                      void *arg)
10496 {
10497         int ret = 0;
10498
10499         if (dev == NULL)
10500                 return -EINVAL;
10501
10502         switch (filter_type) {
10503         case RTE_ETH_FILTER_NONE:
10504                 /* For global configuration */
10505                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
10506                 break;
10507         case RTE_ETH_FILTER_HASH:
10508                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
10509                 break;
10510         case RTE_ETH_FILTER_ETHERTYPE:
10511                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
10512                 break;
10513         case RTE_ETH_FILTER_TUNNEL:
10514                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
10515                 break;
10516         case RTE_ETH_FILTER_FDIR:
10517                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
10518                 break;
10519         case RTE_ETH_FILTER_GENERIC:
10520                 if (filter_op != RTE_ETH_FILTER_GET)
10521                         return -EINVAL;
10522                 *(const void **)arg = &i40e_flow_ops;
10523                 break;
10524         default:
10525                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
10526                                                         filter_type);
10527                 ret = -EINVAL;
10528                 break;
10529         }
10530
10531         return ret;
10532 }
10533
10534 /*
10535  * Check and enable Extended Tag.
10536  * Enabling Extended Tag is important for 40G performance.
10537  */
10538 static void
10539 i40e_enable_extended_tag(struct rte_eth_dev *dev)
10540 {
10541         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10542         uint32_t buf = 0;
10543         int ret;
10544
10545         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10546                                       PCI_DEV_CAP_REG);
10547         if (ret < 0) {
10548                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10549                             PCI_DEV_CAP_REG);
10550                 return;
10551         }
10552         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
10553                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
10554                 return;
10555         }
10556
10557         buf = 0;
10558         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10559                                       PCI_DEV_CTRL_REG);
10560         if (ret < 0) {
10561                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10562                             PCI_DEV_CTRL_REG);
10563                 return;
10564         }
10565         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
10566                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
10567                 return;
10568         }
10569         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
10570         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
10571                                        PCI_DEV_CTRL_REG);
10572         if (ret < 0) {
10573                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
10574                             PCI_DEV_CTRL_REG);
10575                 return;
10576         }
10577 }
10578
10579 /*
10580  * As some registers wouldn't be reset unless a global hardware reset,
10581  * hardware initialization is needed to put those registers into an
10582  * expected initial state.
10583  */
10584 static void
10585 i40e_hw_init(struct rte_eth_dev *dev)
10586 {
10587         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10588
10589         i40e_enable_extended_tag(dev);
10590
10591         /* clear the PF Queue Filter control register */
10592         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
10593
10594         /* Disable symmetric hash per port */
10595         i40e_set_symmetric_hash_enable_per_port(hw, 0);
10596 }
10597
10598 /*
10599  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
10600  * however this function will return only one highest pctype index,
10601  * which is not quite correct. This is known problem of i40e driver
10602  * and needs to be fixed later.
10603  */
10604 enum i40e_filter_pctype
10605 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
10606 {
10607         int i;
10608         uint64_t pctype_mask;
10609
10610         if (flow_type < I40E_FLOW_TYPE_MAX) {
10611                 pctype_mask = adapter->pctypes_tbl[flow_type];
10612                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10613                         if (pctype_mask & (1ULL << i))
10614                                 return (enum i40e_filter_pctype)i;
10615                 }
10616         }
10617         return I40E_FILTER_PCTYPE_INVALID;
10618 }
10619
10620 uint16_t
10621 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10622                         enum i40e_filter_pctype pctype)
10623 {
10624         uint16_t flowtype;
10625         uint64_t pctype_mask = 1ULL << pctype;
10626
10627         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10628              flowtype++) {
10629                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10630                         return flowtype;
10631         }
10632
10633         return RTE_ETH_FLOW_UNKNOWN;
10634 }
10635
10636 /*
10637  * On X710, performance number is far from the expectation on recent firmware
10638  * versions; on XL710, performance number is also far from the expectation on
10639  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10640  * mode is enabled and port MAC address is equal to the packet destination MAC
10641  * address. The fix for this issue may not be integrated in the following
10642  * firmware version. So the workaround in software driver is needed. It needs
10643  * to modify the initial values of 3 internal only registers for both X710 and
10644  * XL710. Note that the values for X710 or XL710 could be different, and the
10645  * workaround can be removed when it is fixed in firmware in the future.
10646  */
10647
10648 /* For both X710 and XL710 */
10649 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10650 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10651 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10652
10653 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10654 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10655
10656 /* For X722 */
10657 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10658 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10659
10660 /* For X710 */
10661 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10662 /* For XL710 */
10663 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10664 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10665
10666 /*
10667  * GL_SWR_PM_UP_THR:
10668  * The value is not impacted from the link speed, its value is set according
10669  * to the total number of ports for a better pipe-monitor configuration.
10670  */
10671 static bool
10672 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10673 {
10674 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10675                 .device_id = (dev),   \
10676                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10677
10678 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10679                 .device_id = (dev),   \
10680                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10681
10682         static const struct {
10683                 uint16_t device_id;
10684                 uint32_t val;
10685         } swr_pm_table[] = {
10686                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10687                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10688                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10689                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10690                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10691
10692                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10693                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10694                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10695                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10696                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10697                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10698                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10699         };
10700         uint32_t i;
10701
10702         if (value == NULL) {
10703                 PMD_DRV_LOG(ERR, "value is NULL");
10704                 return false;
10705         }
10706
10707         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10708                 if (hw->device_id == swr_pm_table[i].device_id) {
10709                         *value = swr_pm_table[i].val;
10710
10711                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10712                                     "value - 0x%08x",
10713                                     hw->device_id, *value);
10714                         return true;
10715                 }
10716         }
10717
10718         return false;
10719 }
10720
10721 static int
10722 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10723 {
10724         enum i40e_status_code status;
10725         struct i40e_aq_get_phy_abilities_resp phy_ab;
10726         int ret = -ENOTSUP;
10727         int retries = 0;
10728
10729         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10730                                               NULL);
10731
10732         while (status) {
10733                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10734                         status);
10735                 retries++;
10736                 rte_delay_us(100000);
10737                 if  (retries < 5)
10738                         status = i40e_aq_get_phy_capabilities(hw, false,
10739                                         true, &phy_ab, NULL);
10740                 else
10741                         return ret;
10742         }
10743         return 0;
10744 }
10745
10746 static void
10747 i40e_configure_registers(struct i40e_hw *hw)
10748 {
10749         static struct {
10750                 uint32_t addr;
10751                 uint64_t val;
10752         } reg_table[] = {
10753                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10754                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10755                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10756         };
10757         uint64_t reg;
10758         uint32_t i;
10759         int ret;
10760
10761         for (i = 0; i < RTE_DIM(reg_table); i++) {
10762                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10763                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10764                                 reg_table[i].val =
10765                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10766                         else /* For X710/XL710/XXV710 */
10767                                 if (hw->aq.fw_maj_ver < 6)
10768                                         reg_table[i].val =
10769                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10770                                 else
10771                                         reg_table[i].val =
10772                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10773                 }
10774
10775                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10776                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10777                                 reg_table[i].val =
10778                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10779                         else /* For X710/XL710/XXV710 */
10780                                 reg_table[i].val =
10781                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10782                 }
10783
10784                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10785                         uint32_t cfg_val;
10786
10787                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10788                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10789                                             "GL_SWR_PM_UP_THR value fixup",
10790                                             hw->device_id);
10791                                 continue;
10792                         }
10793
10794                         reg_table[i].val = cfg_val;
10795                 }
10796
10797                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10798                                                         &reg, NULL);
10799                 if (ret < 0) {
10800                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10801                                                         reg_table[i].addr);
10802                         break;
10803                 }
10804                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10805                                                 reg_table[i].addr, reg);
10806                 if (reg == reg_table[i].val)
10807                         continue;
10808
10809                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10810                                                 reg_table[i].val, NULL);
10811                 if (ret < 0) {
10812                         PMD_DRV_LOG(ERR,
10813                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10814                                 reg_table[i].val, reg_table[i].addr);
10815                         break;
10816                 }
10817                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10818                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10819         }
10820 }
10821
10822 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10823 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10824 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10825 static int
10826 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10827 {
10828         uint32_t reg;
10829         int ret;
10830
10831         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10832                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10833                 return -EINVAL;
10834         }
10835
10836         /* Configure for double VLAN RX stripping */
10837         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10838         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10839                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10840                 ret = i40e_aq_debug_write_register(hw,
10841                                                    I40E_VSI_TSR(vsi->vsi_id),
10842                                                    reg, NULL);
10843                 if (ret < 0) {
10844                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10845                                     vsi->vsi_id);
10846                         return I40E_ERR_CONFIG;
10847                 }
10848         }
10849
10850         /* Configure for double VLAN TX insertion */
10851         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10852         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10853                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10854                 ret = i40e_aq_debug_write_register(hw,
10855                                                    I40E_VSI_L2TAGSTXVALID(
10856                                                    vsi->vsi_id), reg, NULL);
10857                 if (ret < 0) {
10858                         PMD_DRV_LOG(ERR,
10859                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10860                                 vsi->vsi_id);
10861                         return I40E_ERR_CONFIG;
10862                 }
10863         }
10864
10865         return 0;
10866 }
10867
10868 /**
10869  * i40e_aq_add_mirror_rule
10870  * @hw: pointer to the hardware structure
10871  * @seid: VEB seid to add mirror rule to
10872  * @dst_id: destination vsi seid
10873  * @entries: Buffer which contains the entities to be mirrored
10874  * @count: number of entities contained in the buffer
10875  * @rule_id:the rule_id of the rule to be added
10876  *
10877  * Add a mirror rule for a given veb.
10878  *
10879  **/
10880 static enum i40e_status_code
10881 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10882                         uint16_t seid, uint16_t dst_id,
10883                         uint16_t rule_type, uint16_t *entries,
10884                         uint16_t count, uint16_t *rule_id)
10885 {
10886         struct i40e_aq_desc desc;
10887         struct i40e_aqc_add_delete_mirror_rule cmd;
10888         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10889                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10890                 &desc.params.raw;
10891         uint16_t buff_len;
10892         enum i40e_status_code status;
10893
10894         i40e_fill_default_direct_cmd_desc(&desc,
10895                                           i40e_aqc_opc_add_mirror_rule);
10896         memset(&cmd, 0, sizeof(cmd));
10897
10898         buff_len = sizeof(uint16_t) * count;
10899         desc.datalen = rte_cpu_to_le_16(buff_len);
10900         if (buff_len > 0)
10901                 desc.flags |= rte_cpu_to_le_16(
10902                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10903         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10904                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10905         cmd.num_entries = rte_cpu_to_le_16(count);
10906         cmd.seid = rte_cpu_to_le_16(seid);
10907         cmd.destination = rte_cpu_to_le_16(dst_id);
10908
10909         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10910         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10911         PMD_DRV_LOG(INFO,
10912                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10913                 hw->aq.asq_last_status, resp->rule_id,
10914                 resp->mirror_rules_used, resp->mirror_rules_free);
10915         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10916
10917         return status;
10918 }
10919
10920 /**
10921  * i40e_aq_del_mirror_rule
10922  * @hw: pointer to the hardware structure
10923  * @seid: VEB seid to add mirror rule to
10924  * @entries: Buffer which contains the entities to be mirrored
10925  * @count: number of entities contained in the buffer
10926  * @rule_id:the rule_id of the rule to be delete
10927  *
10928  * Delete a mirror rule for a given veb.
10929  *
10930  **/
10931 static enum i40e_status_code
10932 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10933                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10934                 uint16_t count, uint16_t rule_id)
10935 {
10936         struct i40e_aq_desc desc;
10937         struct i40e_aqc_add_delete_mirror_rule cmd;
10938         uint16_t buff_len = 0;
10939         enum i40e_status_code status;
10940         void *buff = NULL;
10941
10942         i40e_fill_default_direct_cmd_desc(&desc,
10943                                           i40e_aqc_opc_delete_mirror_rule);
10944         memset(&cmd, 0, sizeof(cmd));
10945         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10946                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10947                                                           I40E_AQ_FLAG_RD));
10948                 cmd.num_entries = count;
10949                 buff_len = sizeof(uint16_t) * count;
10950                 desc.datalen = rte_cpu_to_le_16(buff_len);
10951                 buff = (void *)entries;
10952         } else
10953                 /* rule id is filled in destination field for deleting mirror rule */
10954                 cmd.destination = rte_cpu_to_le_16(rule_id);
10955
10956         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10957                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10958         cmd.seid = rte_cpu_to_le_16(seid);
10959
10960         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10961         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10962
10963         return status;
10964 }
10965
10966 /**
10967  * i40e_mirror_rule_set
10968  * @dev: pointer to the hardware structure
10969  * @mirror_conf: mirror rule info
10970  * @sw_id: mirror rule's sw_id
10971  * @on: enable/disable
10972  *
10973  * set a mirror rule.
10974  *
10975  **/
10976 static int
10977 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10978                         struct rte_eth_mirror_conf *mirror_conf,
10979                         uint8_t sw_id, uint8_t on)
10980 {
10981         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10982         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10983         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10984         struct i40e_mirror_rule *parent = NULL;
10985         uint16_t seid, dst_seid, rule_id;
10986         uint16_t i, j = 0;
10987         int ret;
10988
10989         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10990
10991         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10992                 PMD_DRV_LOG(ERR,
10993                         "mirror rule can not be configured without veb or vfs.");
10994                 return -ENOSYS;
10995         }
10996         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10997                 PMD_DRV_LOG(ERR, "mirror table is full.");
10998                 return -ENOSPC;
10999         }
11000         if (mirror_conf->dst_pool > pf->vf_num) {
11001                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
11002                                  mirror_conf->dst_pool);
11003                 return -EINVAL;
11004         }
11005
11006         seid = pf->main_vsi->veb->seid;
11007
11008         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11009                 if (sw_id <= it->index) {
11010                         mirr_rule = it;
11011                         break;
11012                 }
11013                 parent = it;
11014         }
11015         if (mirr_rule && sw_id == mirr_rule->index) {
11016                 if (on) {
11017                         PMD_DRV_LOG(ERR, "mirror rule exists.");
11018                         return -EEXIST;
11019                 } else {
11020                         ret = i40e_aq_del_mirror_rule(hw, seid,
11021                                         mirr_rule->rule_type,
11022                                         mirr_rule->entries,
11023                                         mirr_rule->num_entries, mirr_rule->id);
11024                         if (ret < 0) {
11025                                 PMD_DRV_LOG(ERR,
11026                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
11027                                         ret, hw->aq.asq_last_status);
11028                                 return -ENOSYS;
11029                         }
11030                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11031                         rte_free(mirr_rule);
11032                         pf->nb_mirror_rule--;
11033                         return 0;
11034                 }
11035         } else if (!on) {
11036                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11037                 return -ENOENT;
11038         }
11039
11040         mirr_rule = rte_zmalloc("i40e_mirror_rule",
11041                                 sizeof(struct i40e_mirror_rule) , 0);
11042         if (!mirr_rule) {
11043                 PMD_DRV_LOG(ERR, "failed to allocate memory");
11044                 return I40E_ERR_NO_MEMORY;
11045         }
11046         switch (mirror_conf->rule_type) {
11047         case ETH_MIRROR_VLAN:
11048                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
11049                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
11050                                 mirr_rule->entries[j] =
11051                                         mirror_conf->vlan.vlan_id[i];
11052                                 j++;
11053                         }
11054                 }
11055                 if (j == 0) {
11056                         PMD_DRV_LOG(ERR, "vlan is not specified.");
11057                         rte_free(mirr_rule);
11058                         return -EINVAL;
11059                 }
11060                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
11061                 break;
11062         case ETH_MIRROR_VIRTUAL_POOL_UP:
11063         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
11064                 /* check if the specified pool bit is out of range */
11065                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
11066                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
11067                         rte_free(mirr_rule);
11068                         return -EINVAL;
11069                 }
11070                 for (i = 0, j = 0; i < pf->vf_num; i++) {
11071                         if (mirror_conf->pool_mask & (1ULL << i)) {
11072                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
11073                                 j++;
11074                         }
11075                 }
11076                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
11077                         /* add pf vsi to entries */
11078                         mirr_rule->entries[j] = pf->main_vsi_seid;
11079                         j++;
11080                 }
11081                 if (j == 0) {
11082                         PMD_DRV_LOG(ERR, "pool is not specified.");
11083                         rte_free(mirr_rule);
11084                         return -EINVAL;
11085                 }
11086                 /* egress and ingress in aq commands means from switch but not port */
11087                 mirr_rule->rule_type =
11088                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
11089                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
11090                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
11091                 break;
11092         case ETH_MIRROR_UPLINK_PORT:
11093                 /* egress and ingress in aq commands means from switch but not port*/
11094                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
11095                 break;
11096         case ETH_MIRROR_DOWNLINK_PORT:
11097                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
11098                 break;
11099         default:
11100                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
11101                         mirror_conf->rule_type);
11102                 rte_free(mirr_rule);
11103                 return -EINVAL;
11104         }
11105
11106         /* If the dst_pool is equal to vf_num, consider it as PF */
11107         if (mirror_conf->dst_pool == pf->vf_num)
11108                 dst_seid = pf->main_vsi_seid;
11109         else
11110                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
11111
11112         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
11113                                       mirr_rule->rule_type, mirr_rule->entries,
11114                                       j, &rule_id);
11115         if (ret < 0) {
11116                 PMD_DRV_LOG(ERR,
11117                         "failed to add mirror rule: ret = %d, aq_err = %d.",
11118                         ret, hw->aq.asq_last_status);
11119                 rte_free(mirr_rule);
11120                 return -ENOSYS;
11121         }
11122
11123         mirr_rule->index = sw_id;
11124         mirr_rule->num_entries = j;
11125         mirr_rule->id = rule_id;
11126         mirr_rule->dst_vsi_seid = dst_seid;
11127
11128         if (parent)
11129                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
11130         else
11131                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
11132
11133         pf->nb_mirror_rule++;
11134         return 0;
11135 }
11136
11137 /**
11138  * i40e_mirror_rule_reset
11139  * @dev: pointer to the device
11140  * @sw_id: mirror rule's sw_id
11141  *
11142  * reset a mirror rule.
11143  *
11144  **/
11145 static int
11146 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
11147 {
11148         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11149         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11150         struct i40e_mirror_rule *it, *mirr_rule = NULL;
11151         uint16_t seid;
11152         int ret;
11153
11154         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
11155
11156         seid = pf->main_vsi->veb->seid;
11157
11158         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11159                 if (sw_id == it->index) {
11160                         mirr_rule = it;
11161                         break;
11162                 }
11163         }
11164         if (mirr_rule) {
11165                 ret = i40e_aq_del_mirror_rule(hw, seid,
11166                                 mirr_rule->rule_type,
11167                                 mirr_rule->entries,
11168                                 mirr_rule->num_entries, mirr_rule->id);
11169                 if (ret < 0) {
11170                         PMD_DRV_LOG(ERR,
11171                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
11172                                 ret, hw->aq.asq_last_status);
11173                         return -ENOSYS;
11174                 }
11175                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11176                 rte_free(mirr_rule);
11177                 pf->nb_mirror_rule--;
11178         } else {
11179                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11180                 return -ENOENT;
11181         }
11182         return 0;
11183 }
11184
11185 static uint64_t
11186 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
11187 {
11188         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11189         uint64_t systim_cycles;
11190
11191         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
11192         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
11193                         << 32;
11194
11195         return systim_cycles;
11196 }
11197
11198 static uint64_t
11199 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
11200 {
11201         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11202         uint64_t rx_tstamp;
11203
11204         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
11205         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
11206                         << 32;
11207
11208         return rx_tstamp;
11209 }
11210
11211 static uint64_t
11212 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
11213 {
11214         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11215         uint64_t tx_tstamp;
11216
11217         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
11218         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
11219                         << 32;
11220
11221         return tx_tstamp;
11222 }
11223
11224 static void
11225 i40e_start_timecounters(struct rte_eth_dev *dev)
11226 {
11227         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11228         struct i40e_adapter *adapter = dev->data->dev_private;
11229         struct rte_eth_link link;
11230         uint32_t tsync_inc_l;
11231         uint32_t tsync_inc_h;
11232
11233         /* Get current link speed. */
11234         i40e_dev_link_update(dev, 1);
11235         rte_eth_linkstatus_get(dev, &link);
11236
11237         switch (link.link_speed) {
11238         case ETH_SPEED_NUM_40G:
11239         case ETH_SPEED_NUM_25G:
11240                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
11241                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
11242                 break;
11243         case ETH_SPEED_NUM_10G:
11244                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
11245                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
11246                 break;
11247         case ETH_SPEED_NUM_1G:
11248                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
11249                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
11250                 break;
11251         default:
11252                 tsync_inc_l = 0x0;
11253                 tsync_inc_h = 0x0;
11254         }
11255
11256         /* Set the timesync increment value. */
11257         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
11258         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
11259
11260         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
11261         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11262         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11263
11264         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11265         adapter->systime_tc.cc_shift = 0;
11266         adapter->systime_tc.nsec_mask = 0;
11267
11268         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11269         adapter->rx_tstamp_tc.cc_shift = 0;
11270         adapter->rx_tstamp_tc.nsec_mask = 0;
11271
11272         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11273         adapter->tx_tstamp_tc.cc_shift = 0;
11274         adapter->tx_tstamp_tc.nsec_mask = 0;
11275 }
11276
11277 static int
11278 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
11279 {
11280         struct i40e_adapter *adapter = dev->data->dev_private;
11281
11282         adapter->systime_tc.nsec += delta;
11283         adapter->rx_tstamp_tc.nsec += delta;
11284         adapter->tx_tstamp_tc.nsec += delta;
11285
11286         return 0;
11287 }
11288
11289 static int
11290 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
11291 {
11292         uint64_t ns;
11293         struct i40e_adapter *adapter = dev->data->dev_private;
11294
11295         ns = rte_timespec_to_ns(ts);
11296
11297         /* Set the timecounters to a new value. */
11298         adapter->systime_tc.nsec = ns;
11299         adapter->rx_tstamp_tc.nsec = ns;
11300         adapter->tx_tstamp_tc.nsec = ns;
11301
11302         return 0;
11303 }
11304
11305 static int
11306 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
11307 {
11308         uint64_t ns, systime_cycles;
11309         struct i40e_adapter *adapter = dev->data->dev_private;
11310
11311         systime_cycles = i40e_read_systime_cyclecounter(dev);
11312         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
11313         *ts = rte_ns_to_timespec(ns);
11314
11315         return 0;
11316 }
11317
11318 static int
11319 i40e_timesync_enable(struct rte_eth_dev *dev)
11320 {
11321         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11322         uint32_t tsync_ctl_l;
11323         uint32_t tsync_ctl_h;
11324
11325         /* Stop the timesync system time. */
11326         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11327         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11328         /* Reset the timesync system time value. */
11329         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
11330         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
11331
11332         i40e_start_timecounters(dev);
11333
11334         /* Clear timesync registers. */
11335         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11336         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
11337         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
11338         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
11339         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
11340         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
11341
11342         /* Enable timestamping of PTP packets. */
11343         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11344         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
11345
11346         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11347         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
11348         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
11349
11350         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11351         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11352
11353         return 0;
11354 }
11355
11356 static int
11357 i40e_timesync_disable(struct rte_eth_dev *dev)
11358 {
11359         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11360         uint32_t tsync_ctl_l;
11361         uint32_t tsync_ctl_h;
11362
11363         /* Disable timestamping of transmitted PTP packets. */
11364         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11365         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
11366
11367         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11368         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
11369
11370         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11371         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11372
11373         /* Reset the timesync increment value. */
11374         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11375         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11376
11377         return 0;
11378 }
11379
11380 static int
11381 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
11382                                 struct timespec *timestamp, uint32_t flags)
11383 {
11384         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11385         struct i40e_adapter *adapter = dev->data->dev_private;
11386         uint32_t sync_status;
11387         uint32_t index = flags & 0x03;
11388         uint64_t rx_tstamp_cycles;
11389         uint64_t ns;
11390
11391         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
11392         if ((sync_status & (1 << index)) == 0)
11393                 return -EINVAL;
11394
11395         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
11396         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
11397         *timestamp = rte_ns_to_timespec(ns);
11398
11399         return 0;
11400 }
11401
11402 static int
11403 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
11404                                 struct timespec *timestamp)
11405 {
11406         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11407         struct i40e_adapter *adapter = dev->data->dev_private;
11408         uint32_t sync_status;
11409         uint64_t tx_tstamp_cycles;
11410         uint64_t ns;
11411
11412         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11413         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
11414                 return -EINVAL;
11415
11416         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
11417         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
11418         *timestamp = rte_ns_to_timespec(ns);
11419
11420         return 0;
11421 }
11422
11423 /*
11424  * i40e_parse_dcb_configure - parse dcb configure from user
11425  * @dev: the device being configured
11426  * @dcb_cfg: pointer of the result of parse
11427  * @*tc_map: bit map of enabled traffic classes
11428  *
11429  * Returns 0 on success, negative value on failure
11430  */
11431 static int
11432 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
11433                          struct i40e_dcbx_config *dcb_cfg,
11434                          uint8_t *tc_map)
11435 {
11436         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
11437         uint8_t i, tc_bw, bw_lf;
11438
11439         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
11440
11441         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
11442         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
11443                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
11444                 return -EINVAL;
11445         }
11446
11447         /* assume each tc has the same bw */
11448         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
11449         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11450                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
11451         /* to ensure the sum of tcbw is equal to 100 */
11452         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
11453         for (i = 0; i < bw_lf; i++)
11454                 dcb_cfg->etscfg.tcbwtable[i]++;
11455
11456         /* assume each tc has the same Transmission Selection Algorithm */
11457         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11458                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
11459
11460         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11461                 dcb_cfg->etscfg.prioritytable[i] =
11462                                 dcb_rx_conf->dcb_tc[i];
11463
11464         /* FW needs one App to configure HW */
11465         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
11466         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
11467         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
11468         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
11469
11470         if (dcb_rx_conf->nb_tcs == 0)
11471                 *tc_map = 1; /* tc0 only */
11472         else
11473                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
11474
11475         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
11476                 dcb_cfg->pfc.willing = 0;
11477                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
11478                 dcb_cfg->pfc.pfcenable = *tc_map;
11479         }
11480         return 0;
11481 }
11482
11483
11484 static enum i40e_status_code
11485 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
11486                               struct i40e_aqc_vsi_properties_data *info,
11487                               uint8_t enabled_tcmap)
11488 {
11489         enum i40e_status_code ret;
11490         int i, total_tc = 0;
11491         uint16_t qpnum_per_tc, bsf, qp_idx;
11492         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
11493         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
11494         uint16_t used_queues;
11495
11496         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
11497         if (ret != I40E_SUCCESS)
11498                 return ret;
11499
11500         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11501                 if (enabled_tcmap & (1 << i))
11502                         total_tc++;
11503         }
11504         if (total_tc == 0)
11505                 total_tc = 1;
11506         vsi->enabled_tc = enabled_tcmap;
11507
11508         /* different VSI has different queues assigned */
11509         if (vsi->type == I40E_VSI_MAIN)
11510                 used_queues = dev_data->nb_rx_queues -
11511                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11512         else if (vsi->type == I40E_VSI_VMDQ2)
11513                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11514         else {
11515                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
11516                 return I40E_ERR_NO_AVAILABLE_VSI;
11517         }
11518
11519         qpnum_per_tc = used_queues / total_tc;
11520         /* Number of queues per enabled TC */
11521         if (qpnum_per_tc == 0) {
11522                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
11523                 return I40E_ERR_INVALID_QP_ID;
11524         }
11525         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
11526                                 I40E_MAX_Q_PER_TC);
11527         bsf = rte_bsf32(qpnum_per_tc);
11528
11529         /**
11530          * Configure TC and queue mapping parameters, for enabled TC,
11531          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
11532          * default queue will serve it.
11533          */
11534         qp_idx = 0;
11535         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11536                 if (vsi->enabled_tc & (1 << i)) {
11537                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
11538                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
11539                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
11540                         qp_idx += qpnum_per_tc;
11541                 } else
11542                         info->tc_mapping[i] = 0;
11543         }
11544
11545         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
11546         if (vsi->type == I40E_VSI_SRIOV) {
11547                 info->mapping_flags |=
11548                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
11549                 for (i = 0; i < vsi->nb_qps; i++)
11550                         info->queue_mapping[i] =
11551                                 rte_cpu_to_le_16(vsi->base_queue + i);
11552         } else {
11553                 info->mapping_flags |=
11554                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
11555                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
11556         }
11557         info->valid_sections |=
11558                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
11559
11560         return I40E_SUCCESS;
11561 }
11562
11563 /*
11564  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
11565  * @veb: VEB to be configured
11566  * @tc_map: enabled TC bitmap
11567  *
11568  * Returns 0 on success, negative value on failure
11569  */
11570 static enum i40e_status_code
11571 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
11572 {
11573         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
11574         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
11575         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
11576         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
11577         enum i40e_status_code ret = I40E_SUCCESS;
11578         int i;
11579         uint32_t bw_max;
11580
11581         /* Check if enabled_tc is same as existing or new TCs */
11582         if (veb->enabled_tc == tc_map)
11583                 return ret;
11584
11585         /* configure tc bandwidth */
11586         memset(&veb_bw, 0, sizeof(veb_bw));
11587         veb_bw.tc_valid_bits = tc_map;
11588         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11589         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11590                 if (tc_map & BIT_ULL(i))
11591                         veb_bw.tc_bw_share_credits[i] = 1;
11592         }
11593         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
11594                                                    &veb_bw, NULL);
11595         if (ret) {
11596                 PMD_INIT_LOG(ERR,
11597                         "AQ command Config switch_comp BW allocation per TC failed = %d",
11598                         hw->aq.asq_last_status);
11599                 return ret;
11600         }
11601
11602         memset(&ets_query, 0, sizeof(ets_query));
11603         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
11604                                                    &ets_query, NULL);
11605         if (ret != I40E_SUCCESS) {
11606                 PMD_DRV_LOG(ERR,
11607                         "Failed to get switch_comp ETS configuration %u",
11608                         hw->aq.asq_last_status);
11609                 return ret;
11610         }
11611         memset(&bw_query, 0, sizeof(bw_query));
11612         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
11613                                                   &bw_query, NULL);
11614         if (ret != I40E_SUCCESS) {
11615                 PMD_DRV_LOG(ERR,
11616                         "Failed to get switch_comp bandwidth configuration %u",
11617                         hw->aq.asq_last_status);
11618                 return ret;
11619         }
11620
11621         /* store and print out BW info */
11622         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
11623         veb->bw_info.bw_max = ets_query.tc_bw_max;
11624         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
11625         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
11626         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
11627                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
11628                      I40E_16_BIT_WIDTH);
11629         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11630                 veb->bw_info.bw_ets_share_credits[i] =
11631                                 bw_query.tc_bw_share_credits[i];
11632                 veb->bw_info.bw_ets_credits[i] =
11633                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11634                 /* 4 bits per TC, 4th bit is reserved */
11635                 veb->bw_info.bw_ets_max[i] =
11636                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11637                                   RTE_LEN2MASK(3, uint8_t));
11638                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11639                             veb->bw_info.bw_ets_share_credits[i]);
11640                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11641                             veb->bw_info.bw_ets_credits[i]);
11642                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11643                             veb->bw_info.bw_ets_max[i]);
11644         }
11645
11646         veb->enabled_tc = tc_map;
11647
11648         return ret;
11649 }
11650
11651
11652 /*
11653  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11654  * @vsi: VSI to be configured
11655  * @tc_map: enabled TC bitmap
11656  *
11657  * Returns 0 on success, negative value on failure
11658  */
11659 static enum i40e_status_code
11660 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11661 {
11662         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11663         struct i40e_vsi_context ctxt;
11664         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11665         enum i40e_status_code ret = I40E_SUCCESS;
11666         int i;
11667
11668         /* Check if enabled_tc is same as existing or new TCs */
11669         if (vsi->enabled_tc == tc_map)
11670                 return ret;
11671
11672         /* configure tc bandwidth */
11673         memset(&bw_data, 0, sizeof(bw_data));
11674         bw_data.tc_valid_bits = tc_map;
11675         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11676         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11677                 if (tc_map & BIT_ULL(i))
11678                         bw_data.tc_bw_credits[i] = 1;
11679         }
11680         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11681         if (ret) {
11682                 PMD_INIT_LOG(ERR,
11683                         "AQ command Config VSI BW allocation per TC failed = %d",
11684                         hw->aq.asq_last_status);
11685                 goto out;
11686         }
11687         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11688                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11689
11690         /* Update Queue Pairs Mapping for currently enabled UPs */
11691         ctxt.seid = vsi->seid;
11692         ctxt.pf_num = hw->pf_id;
11693         ctxt.vf_num = 0;
11694         ctxt.uplink_seid = vsi->uplink_seid;
11695         ctxt.info = vsi->info;
11696         i40e_get_cap(hw);
11697         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11698         if (ret)
11699                 goto out;
11700
11701         /* Update the VSI after updating the VSI queue-mapping information */
11702         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11703         if (ret) {
11704                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11705                         hw->aq.asq_last_status);
11706                 goto out;
11707         }
11708         /* update the local VSI info with updated queue map */
11709         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11710                                         sizeof(vsi->info.tc_mapping));
11711         rte_memcpy(&vsi->info.queue_mapping,
11712                         &ctxt.info.queue_mapping,
11713                 sizeof(vsi->info.queue_mapping));
11714         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11715         vsi->info.valid_sections = 0;
11716
11717         /* query and update current VSI BW information */
11718         ret = i40e_vsi_get_bw_config(vsi);
11719         if (ret) {
11720                 PMD_INIT_LOG(ERR,
11721                          "Failed updating vsi bw info, err %s aq_err %s",
11722                          i40e_stat_str(hw, ret),
11723                          i40e_aq_str(hw, hw->aq.asq_last_status));
11724                 goto out;
11725         }
11726
11727         vsi->enabled_tc = tc_map;
11728
11729 out:
11730         return ret;
11731 }
11732
11733 /*
11734  * i40e_dcb_hw_configure - program the dcb setting to hw
11735  * @pf: pf the configuration is taken on
11736  * @new_cfg: new configuration
11737  * @tc_map: enabled TC bitmap
11738  *
11739  * Returns 0 on success, negative value on failure
11740  */
11741 static enum i40e_status_code
11742 i40e_dcb_hw_configure(struct i40e_pf *pf,
11743                       struct i40e_dcbx_config *new_cfg,
11744                       uint8_t tc_map)
11745 {
11746         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11747         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11748         struct i40e_vsi *main_vsi = pf->main_vsi;
11749         struct i40e_vsi_list *vsi_list;
11750         enum i40e_status_code ret;
11751         int i;
11752         uint32_t val;
11753
11754         /* Use the FW API if FW > v4.4*/
11755         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11756               (hw->aq.fw_maj_ver >= 5))) {
11757                 PMD_INIT_LOG(ERR,
11758                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11759                 return I40E_ERR_FIRMWARE_API_VERSION;
11760         }
11761
11762         /* Check if need reconfiguration */
11763         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11764                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11765                 return I40E_SUCCESS;
11766         }
11767
11768         /* Copy the new config to the current config */
11769         *old_cfg = *new_cfg;
11770         old_cfg->etsrec = old_cfg->etscfg;
11771         ret = i40e_set_dcb_config(hw);
11772         if (ret) {
11773                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11774                          i40e_stat_str(hw, ret),
11775                          i40e_aq_str(hw, hw->aq.asq_last_status));
11776                 return ret;
11777         }
11778         /* set receive Arbiter to RR mode and ETS scheme by default */
11779         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11780                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11781                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11782                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11783                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11784                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11785                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11786                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11787                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11788                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11789                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11790                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11791                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11792         }
11793         /* get local mib to check whether it is configured correctly */
11794         /* IEEE mode */
11795         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11796         /* Get Local DCB Config */
11797         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11798                                      &hw->local_dcbx_config);
11799
11800         /* if Veb is created, need to update TC of it at first */
11801         if (main_vsi->veb) {
11802                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11803                 if (ret)
11804                         PMD_INIT_LOG(WARNING,
11805                                  "Failed configuring TC for VEB seid=%d",
11806                                  main_vsi->veb->seid);
11807         }
11808         /* Update each VSI */
11809         i40e_vsi_config_tc(main_vsi, tc_map);
11810         if (main_vsi->veb) {
11811                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11812                         /* Beside main VSI and VMDQ VSIs, only enable default
11813                          * TC for other VSIs
11814                          */
11815                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11816                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11817                                                          tc_map);
11818                         else
11819                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11820                                                          I40E_DEFAULT_TCMAP);
11821                         if (ret)
11822                                 PMD_INIT_LOG(WARNING,
11823                                         "Failed configuring TC for VSI seid=%d",
11824                                         vsi_list->vsi->seid);
11825                         /* continue */
11826                 }
11827         }
11828         return I40E_SUCCESS;
11829 }
11830
11831 /*
11832  * i40e_dcb_init_configure - initial dcb config
11833  * @dev: device being configured
11834  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11835  *
11836  * Returns 0 on success, negative value on failure
11837  */
11838 int
11839 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11840 {
11841         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11842         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11843         int i, ret = 0;
11844
11845         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11846                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11847                 return -ENOTSUP;
11848         }
11849
11850         /* DCB initialization:
11851          * Update DCB configuration from the Firmware and configure
11852          * LLDP MIB change event.
11853          */
11854         if (sw_dcb == TRUE) {
11855                 /* Stopping lldp is necessary for DPDK, but it will cause
11856                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11857                  * for successful initialization of DCB is that LLDP is
11858                  * enabled. So it is needed to start lldp before DCB init
11859                  * and stop it after initialization.
11860                  */
11861                 ret = i40e_aq_start_lldp(hw, true, NULL);
11862                 if (ret != I40E_SUCCESS)
11863                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11864
11865                 ret = i40e_init_dcb(hw, true);
11866                 /* If lldp agent is stopped, the return value from
11867                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11868                  * adminq status. Otherwise, it should return success.
11869                  */
11870                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11871                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11872                         memset(&hw->local_dcbx_config, 0,
11873                                 sizeof(struct i40e_dcbx_config));
11874                         /* set dcb default configuration */
11875                         hw->local_dcbx_config.etscfg.willing = 0;
11876                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11877                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11878                         hw->local_dcbx_config.etscfg.tsatable[0] =
11879                                                 I40E_IEEE_TSA_ETS;
11880                         /* all UPs mapping to TC0 */
11881                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11882                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11883                         hw->local_dcbx_config.etsrec =
11884                                 hw->local_dcbx_config.etscfg;
11885                         hw->local_dcbx_config.pfc.willing = 0;
11886                         hw->local_dcbx_config.pfc.pfccap =
11887                                                 I40E_MAX_TRAFFIC_CLASS;
11888                         /* FW needs one App to configure HW */
11889                         hw->local_dcbx_config.numapps = 1;
11890                         hw->local_dcbx_config.app[0].selector =
11891                                                 I40E_APP_SEL_ETHTYPE;
11892                         hw->local_dcbx_config.app[0].priority = 3;
11893                         hw->local_dcbx_config.app[0].protocolid =
11894                                                 I40E_APP_PROTOID_FCOE;
11895                         ret = i40e_set_dcb_config(hw);
11896                         if (ret) {
11897                                 PMD_INIT_LOG(ERR,
11898                                         "default dcb config fails. err = %d, aq_err = %d.",
11899                                         ret, hw->aq.asq_last_status);
11900                                 return -ENOSYS;
11901                         }
11902                 } else {
11903                         PMD_INIT_LOG(ERR,
11904                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11905                                 ret, hw->aq.asq_last_status);
11906                         return -ENOTSUP;
11907                 }
11908
11909                 if (i40e_need_stop_lldp(dev)) {
11910                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
11911                         if (ret != I40E_SUCCESS)
11912                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11913                 }
11914         } else {
11915                 ret = i40e_aq_start_lldp(hw, true, NULL);
11916                 if (ret != I40E_SUCCESS)
11917                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11918
11919                 ret = i40e_init_dcb(hw, true);
11920                 if (!ret) {
11921                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11922                                 PMD_INIT_LOG(ERR,
11923                                         "HW doesn't support DCBX offload.");
11924                                 return -ENOTSUP;
11925                         }
11926                 } else {
11927                         PMD_INIT_LOG(ERR,
11928                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11929                                 ret, hw->aq.asq_last_status);
11930                         return -ENOTSUP;
11931                 }
11932         }
11933         return 0;
11934 }
11935
11936 /*
11937  * i40e_dcb_setup - setup dcb related config
11938  * @dev: device being configured
11939  *
11940  * Returns 0 on success, negative value on failure
11941  */
11942 static int
11943 i40e_dcb_setup(struct rte_eth_dev *dev)
11944 {
11945         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11946         struct i40e_dcbx_config dcb_cfg;
11947         uint8_t tc_map = 0;
11948         int ret = 0;
11949
11950         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11951                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11952                 return -ENOTSUP;
11953         }
11954
11955         if (pf->vf_num != 0)
11956                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11957
11958         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11959         if (ret) {
11960                 PMD_INIT_LOG(ERR, "invalid dcb config");
11961                 return -EINVAL;
11962         }
11963         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11964         if (ret) {
11965                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11966                 return -ENOSYS;
11967         }
11968
11969         return 0;
11970 }
11971
11972 static int
11973 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11974                       struct rte_eth_dcb_info *dcb_info)
11975 {
11976         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11977         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11978         struct i40e_vsi *vsi = pf->main_vsi;
11979         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11980         uint16_t bsf, tc_mapping;
11981         int i, j = 0;
11982
11983         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11984                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11985         else
11986                 dcb_info->nb_tcs = 1;
11987         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11988                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11989         for (i = 0; i < dcb_info->nb_tcs; i++)
11990                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11991
11992         /* get queue mapping if vmdq is disabled */
11993         if (!pf->nb_cfg_vmdq_vsi) {
11994                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11995                         if (!(vsi->enabled_tc & (1 << i)))
11996                                 continue;
11997                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11998                         dcb_info->tc_queue.tc_rxq[j][i].base =
11999                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12000                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12001                         dcb_info->tc_queue.tc_txq[j][i].base =
12002                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12003                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12004                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12005                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12006                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12007                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12008                 }
12009                 return 0;
12010         }
12011
12012         /* get queue mapping if vmdq is enabled */
12013         do {
12014                 vsi = pf->vmdq[j].vsi;
12015                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12016                         if (!(vsi->enabled_tc & (1 << i)))
12017                                 continue;
12018                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
12019                         dcb_info->tc_queue.tc_rxq[j][i].base =
12020                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12021                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12022                         dcb_info->tc_queue.tc_txq[j][i].base =
12023                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12024                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12025                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12026                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12027                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12028                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12029                 }
12030                 j++;
12031         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
12032         return 0;
12033 }
12034
12035 static int
12036 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
12037 {
12038         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12039         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12040         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12041         uint16_t msix_intr;
12042
12043         msix_intr = intr_handle->intr_vec[queue_id];
12044         if (msix_intr == I40E_MISC_VEC_ID)
12045                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12046                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
12047                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
12048                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12049         else
12050                 I40E_WRITE_REG(hw,
12051                                I40E_PFINT_DYN_CTLN(msix_intr -
12052                                                    I40E_RX_VEC_START),
12053                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
12054                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
12055                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12056
12057         I40E_WRITE_FLUSH(hw);
12058         rte_intr_ack(&pci_dev->intr_handle);
12059
12060         return 0;
12061 }
12062
12063 static int
12064 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
12065 {
12066         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12067         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12068         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12069         uint16_t msix_intr;
12070
12071         msix_intr = intr_handle->intr_vec[queue_id];
12072         if (msix_intr == I40E_MISC_VEC_ID)
12073                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12074                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12075         else
12076                 I40E_WRITE_REG(hw,
12077                                I40E_PFINT_DYN_CTLN(msix_intr -
12078                                                    I40E_RX_VEC_START),
12079                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12080         I40E_WRITE_FLUSH(hw);
12081
12082         return 0;
12083 }
12084
12085 /**
12086  * This function is used to check if the register is valid.
12087  * Below is the valid registers list for X722 only:
12088  * 0x2b800--0x2bb00
12089  * 0x38700--0x38a00
12090  * 0x3d800--0x3db00
12091  * 0x208e00--0x209000
12092  * 0x20be00--0x20c000
12093  * 0x263c00--0x264000
12094  * 0x265c00--0x266000
12095  */
12096 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
12097 {
12098         if ((type != I40E_MAC_X722) &&
12099             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
12100              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
12101              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
12102              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
12103              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
12104              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
12105              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
12106                 return 0;
12107         else
12108                 return 1;
12109 }
12110
12111 static int i40e_get_regs(struct rte_eth_dev *dev,
12112                          struct rte_dev_reg_info *regs)
12113 {
12114         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12115         uint32_t *ptr_data = regs->data;
12116         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
12117         const struct i40e_reg_info *reg_info;
12118
12119         if (ptr_data == NULL) {
12120                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
12121                 regs->width = sizeof(uint32_t);
12122                 return 0;
12123         }
12124
12125         /* The first few registers have to be read using AQ operations */
12126         reg_idx = 0;
12127         while (i40e_regs_adminq[reg_idx].name) {
12128                 reg_info = &i40e_regs_adminq[reg_idx++];
12129                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12130                         for (arr_idx2 = 0;
12131                                         arr_idx2 <= reg_info->count2;
12132                                         arr_idx2++) {
12133                                 reg_offset = arr_idx * reg_info->stride1 +
12134                                         arr_idx2 * reg_info->stride2;
12135                                 reg_offset += reg_info->base_addr;
12136                                 ptr_data[reg_offset >> 2] =
12137                                         i40e_read_rx_ctl(hw, reg_offset);
12138                         }
12139         }
12140
12141         /* The remaining registers can be read using primitives */
12142         reg_idx = 0;
12143         while (i40e_regs_others[reg_idx].name) {
12144                 reg_info = &i40e_regs_others[reg_idx++];
12145                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12146                         for (arr_idx2 = 0;
12147                                         arr_idx2 <= reg_info->count2;
12148                                         arr_idx2++) {
12149                                 reg_offset = arr_idx * reg_info->stride1 +
12150                                         arr_idx2 * reg_info->stride2;
12151                                 reg_offset += reg_info->base_addr;
12152                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
12153                                         ptr_data[reg_offset >> 2] = 0;
12154                                 else
12155                                         ptr_data[reg_offset >> 2] =
12156                                                 I40E_READ_REG(hw, reg_offset);
12157                         }
12158         }
12159
12160         return 0;
12161 }
12162
12163 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
12164 {
12165         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12166
12167         /* Convert word count to byte count */
12168         return hw->nvm.sr_size << 1;
12169 }
12170
12171 static int i40e_get_eeprom(struct rte_eth_dev *dev,
12172                            struct rte_dev_eeprom_info *eeprom)
12173 {
12174         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12175         uint16_t *data = eeprom->data;
12176         uint16_t offset, length, cnt_words;
12177         int ret_code;
12178
12179         offset = eeprom->offset >> 1;
12180         length = eeprom->length >> 1;
12181         cnt_words = length;
12182
12183         if (offset > hw->nvm.sr_size ||
12184                 offset + length > hw->nvm.sr_size) {
12185                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
12186                 return -EINVAL;
12187         }
12188
12189         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
12190
12191         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
12192         if (ret_code != I40E_SUCCESS || cnt_words != length) {
12193                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
12194                 return -EIO;
12195         }
12196
12197         return 0;
12198 }
12199
12200 static int i40e_get_module_info(struct rte_eth_dev *dev,
12201                                 struct rte_eth_dev_module_info *modinfo)
12202 {
12203         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12204         uint32_t sff8472_comp = 0;
12205         uint32_t sff8472_swap = 0;
12206         uint32_t sff8636_rev = 0;
12207         i40e_status status;
12208         uint32_t type = 0;
12209
12210         /* Check if firmware supports reading module EEPROM. */
12211         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
12212                 PMD_DRV_LOG(ERR,
12213                             "Module EEPROM memory read not supported. "
12214                             "Please update the NVM image.\n");
12215                 return -EINVAL;
12216         }
12217
12218         status = i40e_update_link_info(hw);
12219         if (status)
12220                 return -EIO;
12221
12222         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
12223                 PMD_DRV_LOG(ERR,
12224                             "Cannot read module EEPROM memory. "
12225                             "No module connected.\n");
12226                 return -EINVAL;
12227         }
12228
12229         type = hw->phy.link_info.module_type[0];
12230
12231         switch (type) {
12232         case I40E_MODULE_TYPE_SFP:
12233                 status = i40e_aq_get_phy_register(hw,
12234                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12235                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12236                                 I40E_MODULE_SFF_8472_COMP,
12237                                 &sff8472_comp, NULL);
12238                 if (status)
12239                         return -EIO;
12240
12241                 status = i40e_aq_get_phy_register(hw,
12242                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12243                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12244                                 I40E_MODULE_SFF_8472_SWAP,
12245                                 &sff8472_swap, NULL);
12246                 if (status)
12247                         return -EIO;
12248
12249                 /* Check if the module requires address swap to access
12250                  * the other EEPROM memory page.
12251                  */
12252                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
12253                         PMD_DRV_LOG(WARNING,
12254                                     "Module address swap to access "
12255                                     "page 0xA2 is not supported.\n");
12256                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12257                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12258                 } else if (sff8472_comp == 0x00) {
12259                         /* Module is not SFF-8472 compliant */
12260                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12261                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12262                 } else {
12263                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
12264                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
12265                 }
12266                 break;
12267         case I40E_MODULE_TYPE_QSFP_PLUS:
12268                 /* Read from memory page 0. */
12269                 status = i40e_aq_get_phy_register(hw,
12270                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12271                                 0, 1,
12272                                 I40E_MODULE_REVISION_ADDR,
12273                                 &sff8636_rev, NULL);
12274                 if (status)
12275                         return -EIO;
12276                 /* Determine revision compliance byte */
12277                 if (sff8636_rev > 0x02) {
12278                         /* Module is SFF-8636 compliant */
12279                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
12280                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12281                 } else {
12282                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
12283                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12284                 }
12285                 break;
12286         case I40E_MODULE_TYPE_QSFP28:
12287                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
12288                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12289                 break;
12290         default:
12291                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
12292                 return -EINVAL;
12293         }
12294         return 0;
12295 }
12296
12297 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
12298                                   struct rte_dev_eeprom_info *info)
12299 {
12300         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12301         bool is_sfp = false;
12302         i40e_status status;
12303         uint8_t *data;
12304         uint32_t value = 0;
12305         uint32_t i;
12306
12307         if (!info || !info->length || !info->data)
12308                 return -EINVAL;
12309
12310         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
12311                 is_sfp = true;
12312
12313         data = info->data;
12314         for (i = 0; i < info->length; i++) {
12315                 u32 offset = i + info->offset;
12316                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
12317
12318                 /* Check if we need to access the other memory page */
12319                 if (is_sfp) {
12320                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
12321                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
12322                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
12323                         }
12324                 } else {
12325                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
12326                                 /* Compute memory page number and offset. */
12327                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
12328                                 addr++;
12329                         }
12330                 }
12331                 status = i40e_aq_get_phy_register(hw,
12332                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12333                                 addr, 1, offset, &value, NULL);
12334                 if (status)
12335                         return -EIO;
12336                 data[i] = (uint8_t)value;
12337         }
12338         return 0;
12339 }
12340
12341 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
12342                                      struct rte_ether_addr *mac_addr)
12343 {
12344         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12345         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12346         struct i40e_vsi *vsi = pf->main_vsi;
12347         struct i40e_mac_filter_info mac_filter;
12348         struct i40e_mac_filter *f;
12349         int ret;
12350
12351         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
12352                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
12353                 return -EINVAL;
12354         }
12355
12356         TAILQ_FOREACH(f, &vsi->mac_list, next) {
12357                 if (rte_is_same_ether_addr(&pf->dev_addr,
12358                                                 &f->mac_info.mac_addr))
12359                         break;
12360         }
12361
12362         if (f == NULL) {
12363                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
12364                 return -EIO;
12365         }
12366
12367         mac_filter = f->mac_info;
12368         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
12369         if (ret != I40E_SUCCESS) {
12370                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
12371                 return -EIO;
12372         }
12373         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
12374         ret = i40e_vsi_add_mac(vsi, &mac_filter);
12375         if (ret != I40E_SUCCESS) {
12376                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
12377                 return -EIO;
12378         }
12379         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
12380
12381         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
12382                                         mac_addr->addr_bytes, NULL);
12383         if (ret != I40E_SUCCESS) {
12384                 PMD_DRV_LOG(ERR, "Failed to change mac");
12385                 return -EIO;
12386         }
12387
12388         return 0;
12389 }
12390
12391 static int
12392 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
12393 {
12394         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12395         struct rte_eth_dev_data *dev_data = pf->dev_data;
12396         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
12397         int ret = 0;
12398
12399         /* check if mtu is within the allowed range */
12400         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
12401                 return -EINVAL;
12402
12403         /* mtu setting is forbidden if port is start */
12404         if (dev_data->dev_started) {
12405                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
12406                             dev_data->port_id);
12407                 return -EBUSY;
12408         }
12409
12410         if (frame_size > RTE_ETHER_MAX_LEN)
12411                 dev_data->dev_conf.rxmode.offloads |=
12412                         DEV_RX_OFFLOAD_JUMBO_FRAME;
12413         else
12414                 dev_data->dev_conf.rxmode.offloads &=
12415                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
12416
12417         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
12418
12419         return ret;
12420 }
12421
12422 /* Restore ethertype filter */
12423 static void
12424 i40e_ethertype_filter_restore(struct i40e_pf *pf)
12425 {
12426         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12427         struct i40e_ethertype_filter_list
12428                 *ethertype_list = &pf->ethertype.ethertype_list;
12429         struct i40e_ethertype_filter *f;
12430         struct i40e_control_filter_stats stats;
12431         uint16_t flags;
12432
12433         TAILQ_FOREACH(f, ethertype_list, rules) {
12434                 flags = 0;
12435                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
12436                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
12437                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
12438                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
12439                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
12440
12441                 memset(&stats, 0, sizeof(stats));
12442                 i40e_aq_add_rem_control_packet_filter(hw,
12443                                             f->input.mac_addr.addr_bytes,
12444                                             f->input.ether_type,
12445                                             flags, pf->main_vsi->seid,
12446                                             f->queue, 1, &stats, NULL);
12447         }
12448         PMD_DRV_LOG(INFO, "Ethertype filter:"
12449                     " mac_etype_used = %u, etype_used = %u,"
12450                     " mac_etype_free = %u, etype_free = %u",
12451                     stats.mac_etype_used, stats.etype_used,
12452                     stats.mac_etype_free, stats.etype_free);
12453 }
12454
12455 /* Restore tunnel filter */
12456 static void
12457 i40e_tunnel_filter_restore(struct i40e_pf *pf)
12458 {
12459         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12460         struct i40e_vsi *vsi;
12461         struct i40e_pf_vf *vf;
12462         struct i40e_tunnel_filter_list
12463                 *tunnel_list = &pf->tunnel.tunnel_list;
12464         struct i40e_tunnel_filter *f;
12465         struct i40e_aqc_cloud_filters_element_bb cld_filter;
12466         bool big_buffer = 0;
12467
12468         TAILQ_FOREACH(f, tunnel_list, rules) {
12469                 if (!f->is_to_vf)
12470                         vsi = pf->main_vsi;
12471                 else {
12472                         vf = &pf->vfs[f->vf_id];
12473                         vsi = vf->vsi;
12474                 }
12475                 memset(&cld_filter, 0, sizeof(cld_filter));
12476                 rte_ether_addr_copy((struct rte_ether_addr *)
12477                                 &f->input.outer_mac,
12478                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
12479                 rte_ether_addr_copy((struct rte_ether_addr *)
12480                                 &f->input.inner_mac,
12481                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
12482                 cld_filter.element.inner_vlan = f->input.inner_vlan;
12483                 cld_filter.element.flags = f->input.flags;
12484                 cld_filter.element.tenant_id = f->input.tenant_id;
12485                 cld_filter.element.queue_number = f->queue;
12486                 rte_memcpy(cld_filter.general_fields,
12487                            f->input.general_fields,
12488                            sizeof(f->input.general_fields));
12489
12490                 if (((f->input.flags &
12491                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
12492                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
12493                     ((f->input.flags &
12494                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
12495                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
12496                     ((f->input.flags &
12497                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
12498                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
12499                         big_buffer = 1;
12500
12501                 if (big_buffer)
12502                         i40e_aq_add_cloud_filters_bb(hw,
12503                                         vsi->seid, &cld_filter, 1);
12504                 else
12505                         i40e_aq_add_cloud_filters(hw, vsi->seid,
12506                                                   &cld_filter.element, 1);
12507         }
12508 }
12509
12510 /* Restore RSS filter */
12511 static inline void
12512 i40e_rss_filter_restore(struct i40e_pf *pf)
12513 {
12514         struct i40e_rss_conf_list *list = &pf->rss_config_list;
12515         struct i40e_rss_filter *filter;
12516
12517         TAILQ_FOREACH(filter, list, next) {
12518                 i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
12519         }
12520 }
12521
12522 static void
12523 i40e_filter_restore(struct i40e_pf *pf)
12524 {
12525         i40e_ethertype_filter_restore(pf);
12526         i40e_tunnel_filter_restore(pf);
12527         i40e_fdir_filter_restore(pf);
12528         i40e_rss_filter_restore(pf);
12529 }
12530
12531 bool
12532 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
12533 {
12534         if (strcmp(dev->device->driver->name, drv->driver.name))
12535                 return false;
12536
12537         return true;
12538 }
12539
12540 bool
12541 is_i40e_supported(struct rte_eth_dev *dev)
12542 {
12543         return is_device_supported(dev, &rte_i40e_pmd);
12544 }
12545
12546 struct i40e_customized_pctype*
12547 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
12548 {
12549         int i;
12550
12551         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
12552                 if (pf->customized_pctype[i].index == index)
12553                         return &pf->customized_pctype[i];
12554         }
12555         return NULL;
12556 }
12557
12558 static int
12559 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
12560                               uint32_t pkg_size, uint32_t proto_num,
12561                               struct rte_pmd_i40e_proto_info *proto,
12562                               enum rte_pmd_i40e_package_op op)
12563 {
12564         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12565         uint32_t pctype_num;
12566         struct rte_pmd_i40e_ptype_info *pctype;
12567         uint32_t buff_size;
12568         struct i40e_customized_pctype *new_pctype = NULL;
12569         uint8_t proto_id;
12570         uint8_t pctype_value;
12571         char name[64];
12572         uint32_t i, j, n;
12573         int ret;
12574
12575         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12576             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12577                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12578                 return -1;
12579         }
12580
12581         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12582                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
12583                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
12584         if (ret) {
12585                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
12586                 return -1;
12587         }
12588         if (!pctype_num) {
12589                 PMD_DRV_LOG(INFO, "No new pctype added");
12590                 return -1;
12591         }
12592
12593         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
12594         pctype = rte_zmalloc("new_pctype", buff_size, 0);
12595         if (!pctype) {
12596                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12597                 return -1;
12598         }
12599         /* get information about new pctype list */
12600         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12601                                         (uint8_t *)pctype, buff_size,
12602                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
12603         if (ret) {
12604                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
12605                 rte_free(pctype);
12606                 return -1;
12607         }
12608
12609         /* Update customized pctype. */
12610         for (i = 0; i < pctype_num; i++) {
12611                 pctype_value = pctype[i].ptype_id;
12612                 memset(name, 0, sizeof(name));
12613                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12614                         proto_id = pctype[i].protocols[j];
12615                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12616                                 continue;
12617                         for (n = 0; n < proto_num; n++) {
12618                                 if (proto[n].proto_id != proto_id)
12619                                         continue;
12620                                 strlcat(name, proto[n].name, sizeof(name));
12621                                 strlcat(name, "_", sizeof(name));
12622                                 break;
12623                         }
12624                 }
12625                 name[strlen(name) - 1] = '\0';
12626                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12627                 if (!strcmp(name, "GTPC"))
12628                         new_pctype =
12629                                 i40e_find_customized_pctype(pf,
12630                                                       I40E_CUSTOMIZED_GTPC);
12631                 else if (!strcmp(name, "GTPU_IPV4"))
12632                         new_pctype =
12633                                 i40e_find_customized_pctype(pf,
12634                                                    I40E_CUSTOMIZED_GTPU_IPV4);
12635                 else if (!strcmp(name, "GTPU_IPV6"))
12636                         new_pctype =
12637                                 i40e_find_customized_pctype(pf,
12638                                                    I40E_CUSTOMIZED_GTPU_IPV6);
12639                 else if (!strcmp(name, "GTPU"))
12640                         new_pctype =
12641                                 i40e_find_customized_pctype(pf,
12642                                                       I40E_CUSTOMIZED_GTPU);
12643                 else if (!strcmp(name, "IPV4_L2TPV3"))
12644                         new_pctype =
12645                                 i40e_find_customized_pctype(pf,
12646                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
12647                 else if (!strcmp(name, "IPV6_L2TPV3"))
12648                         new_pctype =
12649                                 i40e_find_customized_pctype(pf,
12650                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
12651                 else if (!strcmp(name, "IPV4_ESP"))
12652                         new_pctype =
12653                                 i40e_find_customized_pctype(pf,
12654                                                 I40E_CUSTOMIZED_ESP_IPV4);
12655                 else if (!strcmp(name, "IPV6_ESP"))
12656                         new_pctype =
12657                                 i40e_find_customized_pctype(pf,
12658                                                 I40E_CUSTOMIZED_ESP_IPV6);
12659                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12660                         new_pctype =
12661                                 i40e_find_customized_pctype(pf,
12662                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12663                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12664                         new_pctype =
12665                                 i40e_find_customized_pctype(pf,
12666                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12667                 else if (!strcmp(name, "IPV4_AH"))
12668                         new_pctype =
12669                                 i40e_find_customized_pctype(pf,
12670                                                 I40E_CUSTOMIZED_AH_IPV4);
12671                 else if (!strcmp(name, "IPV6_AH"))
12672                         new_pctype =
12673                                 i40e_find_customized_pctype(pf,
12674                                                 I40E_CUSTOMIZED_AH_IPV6);
12675                 if (new_pctype) {
12676                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12677                                 new_pctype->pctype = pctype_value;
12678                                 new_pctype->valid = true;
12679                         } else {
12680                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12681                                 new_pctype->valid = false;
12682                         }
12683                 }
12684         }
12685
12686         rte_free(pctype);
12687         return 0;
12688 }
12689
12690 static int
12691 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12692                              uint32_t pkg_size, uint32_t proto_num,
12693                              struct rte_pmd_i40e_proto_info *proto,
12694                              enum rte_pmd_i40e_package_op op)
12695 {
12696         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12697         uint16_t port_id = dev->data->port_id;
12698         uint32_t ptype_num;
12699         struct rte_pmd_i40e_ptype_info *ptype;
12700         uint32_t buff_size;
12701         uint8_t proto_id;
12702         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12703         uint32_t i, j, n;
12704         bool in_tunnel;
12705         int ret;
12706
12707         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12708             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12709                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12710                 return -1;
12711         }
12712
12713         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12714                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12715                 return 0;
12716         }
12717
12718         /* get information about new ptype num */
12719         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12720                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12721                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12722         if (ret) {
12723                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12724                 return ret;
12725         }
12726         if (!ptype_num) {
12727                 PMD_DRV_LOG(INFO, "No new ptype added");
12728                 return -1;
12729         }
12730
12731         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12732         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12733         if (!ptype) {
12734                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12735                 return -1;
12736         }
12737
12738         /* get information about new ptype list */
12739         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12740                                         (uint8_t *)ptype, buff_size,
12741                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12742         if (ret) {
12743                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12744                 rte_free(ptype);
12745                 return ret;
12746         }
12747
12748         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12749         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12750         if (!ptype_mapping) {
12751                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12752                 rte_free(ptype);
12753                 return -1;
12754         }
12755
12756         /* Update ptype mapping table. */
12757         for (i = 0; i < ptype_num; i++) {
12758                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12759                 ptype_mapping[i].sw_ptype = 0;
12760                 in_tunnel = false;
12761                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12762                         proto_id = ptype[i].protocols[j];
12763                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12764                                 continue;
12765                         for (n = 0; n < proto_num; n++) {
12766                                 if (proto[n].proto_id != proto_id)
12767                                         continue;
12768                                 memset(name, 0, sizeof(name));
12769                                 strcpy(name, proto[n].name);
12770                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12771                                 if (!strncasecmp(name, "PPPOE", 5))
12772                                         ptype_mapping[i].sw_ptype |=
12773                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12774                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12775                                          !in_tunnel) {
12776                                         ptype_mapping[i].sw_ptype |=
12777                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12778                                         ptype_mapping[i].sw_ptype |=
12779                                                 RTE_PTYPE_L4_FRAG;
12780                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12781                                            in_tunnel) {
12782                                         ptype_mapping[i].sw_ptype |=
12783                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12784                                         ptype_mapping[i].sw_ptype |=
12785                                                 RTE_PTYPE_INNER_L4_FRAG;
12786                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12787                                         ptype_mapping[i].sw_ptype |=
12788                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12789                                         in_tunnel = true;
12790                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12791                                            !in_tunnel)
12792                                         ptype_mapping[i].sw_ptype |=
12793                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12794                                 else if (!strncasecmp(name, "IPV4", 4) &&
12795                                          in_tunnel)
12796                                         ptype_mapping[i].sw_ptype |=
12797                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12798                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12799                                          !in_tunnel) {
12800                                         ptype_mapping[i].sw_ptype |=
12801                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12802                                         ptype_mapping[i].sw_ptype |=
12803                                                 RTE_PTYPE_L4_FRAG;
12804                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12805                                            in_tunnel) {
12806                                         ptype_mapping[i].sw_ptype |=
12807                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12808                                         ptype_mapping[i].sw_ptype |=
12809                                                 RTE_PTYPE_INNER_L4_FRAG;
12810                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12811                                         ptype_mapping[i].sw_ptype |=
12812                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12813                                         in_tunnel = true;
12814                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12815                                            !in_tunnel)
12816                                         ptype_mapping[i].sw_ptype |=
12817                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12818                                 else if (!strncasecmp(name, "IPV6", 4) &&
12819                                          in_tunnel)
12820                                         ptype_mapping[i].sw_ptype |=
12821                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12822                                 else if (!strncasecmp(name, "UDP", 3) &&
12823                                          !in_tunnel)
12824                                         ptype_mapping[i].sw_ptype |=
12825                                                 RTE_PTYPE_L4_UDP;
12826                                 else if (!strncasecmp(name, "UDP", 3) &&
12827                                          in_tunnel)
12828                                         ptype_mapping[i].sw_ptype |=
12829                                                 RTE_PTYPE_INNER_L4_UDP;
12830                                 else if (!strncasecmp(name, "TCP", 3) &&
12831                                          !in_tunnel)
12832                                         ptype_mapping[i].sw_ptype |=
12833                                                 RTE_PTYPE_L4_TCP;
12834                                 else if (!strncasecmp(name, "TCP", 3) &&
12835                                          in_tunnel)
12836                                         ptype_mapping[i].sw_ptype |=
12837                                                 RTE_PTYPE_INNER_L4_TCP;
12838                                 else if (!strncasecmp(name, "SCTP", 4) &&
12839                                          !in_tunnel)
12840                                         ptype_mapping[i].sw_ptype |=
12841                                                 RTE_PTYPE_L4_SCTP;
12842                                 else if (!strncasecmp(name, "SCTP", 4) &&
12843                                          in_tunnel)
12844                                         ptype_mapping[i].sw_ptype |=
12845                                                 RTE_PTYPE_INNER_L4_SCTP;
12846                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12847                                           !strncasecmp(name, "ICMPV6", 6)) &&
12848                                          !in_tunnel)
12849                                         ptype_mapping[i].sw_ptype |=
12850                                                 RTE_PTYPE_L4_ICMP;
12851                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12852                                           !strncasecmp(name, "ICMPV6", 6)) &&
12853                                          in_tunnel)
12854                                         ptype_mapping[i].sw_ptype |=
12855                                                 RTE_PTYPE_INNER_L4_ICMP;
12856                                 else if (!strncasecmp(name, "GTPC", 4)) {
12857                                         ptype_mapping[i].sw_ptype |=
12858                                                 RTE_PTYPE_TUNNEL_GTPC;
12859                                         in_tunnel = true;
12860                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12861                                         ptype_mapping[i].sw_ptype |=
12862                                                 RTE_PTYPE_TUNNEL_GTPU;
12863                                         in_tunnel = true;
12864                                 } else if (!strncasecmp(name, "ESP", 3)) {
12865                                         ptype_mapping[i].sw_ptype |=
12866                                                 RTE_PTYPE_TUNNEL_ESP;
12867                                         in_tunnel = true;
12868                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12869                                         ptype_mapping[i].sw_ptype |=
12870                                                 RTE_PTYPE_TUNNEL_GRENAT;
12871                                         in_tunnel = true;
12872                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12873                                            !strncasecmp(name, "L2TPV2", 6) ||
12874                                            !strncasecmp(name, "L2TPV3", 6)) {
12875                                         ptype_mapping[i].sw_ptype |=
12876                                                 RTE_PTYPE_TUNNEL_L2TP;
12877                                         in_tunnel = true;
12878                                 }
12879
12880                                 break;
12881                         }
12882                 }
12883         }
12884
12885         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12886                                                 ptype_num, 0);
12887         if (ret)
12888                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
12889
12890         rte_free(ptype_mapping);
12891         rte_free(ptype);
12892         return ret;
12893 }
12894
12895 void
12896 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12897                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12898 {
12899         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12900         uint32_t proto_num;
12901         struct rte_pmd_i40e_proto_info *proto;
12902         uint32_t buff_size;
12903         uint32_t i;
12904         int ret;
12905
12906         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12907             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12908                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12909                 return;
12910         }
12911
12912         /* get information about protocol number */
12913         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12914                                        (uint8_t *)&proto_num, sizeof(proto_num),
12915                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12916         if (ret) {
12917                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12918                 return;
12919         }
12920         if (!proto_num) {
12921                 PMD_DRV_LOG(INFO, "No new protocol added");
12922                 return;
12923         }
12924
12925         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12926         proto = rte_zmalloc("new_proto", buff_size, 0);
12927         if (!proto) {
12928                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12929                 return;
12930         }
12931
12932         /* get information about protocol list */
12933         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12934                                         (uint8_t *)proto, buff_size,
12935                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12936         if (ret) {
12937                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12938                 rte_free(proto);
12939                 return;
12940         }
12941
12942         /* Check if GTP is supported. */
12943         for (i = 0; i < proto_num; i++) {
12944                 if (!strncmp(proto[i].name, "GTP", 3)) {
12945                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12946                                 pf->gtp_support = true;
12947                         else
12948                                 pf->gtp_support = false;
12949                         break;
12950                 }
12951         }
12952
12953         /* Check if ESP is supported. */
12954         for (i = 0; i < proto_num; i++) {
12955                 if (!strncmp(proto[i].name, "ESP", 3)) {
12956                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12957                                 pf->esp_support = true;
12958                         else
12959                                 pf->esp_support = false;
12960                         break;
12961                 }
12962         }
12963
12964         /* Update customized pctype info */
12965         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12966                                             proto_num, proto, op);
12967         if (ret)
12968                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12969
12970         /* Update customized ptype info */
12971         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12972                                            proto_num, proto, op);
12973         if (ret)
12974                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12975
12976         rte_free(proto);
12977 }
12978
12979 /* Create a QinQ cloud filter
12980  *
12981  * The Fortville NIC has limited resources for tunnel filters,
12982  * so we can only reuse existing filters.
12983  *
12984  * In step 1 we define which Field Vector fields can be used for
12985  * filter types.
12986  * As we do not have the inner tag defined as a field,
12987  * we have to define it first, by reusing one of L1 entries.
12988  *
12989  * In step 2 we are replacing one of existing filter types with
12990  * a new one for QinQ.
12991  * As we reusing L1 and replacing L2, some of the default filter
12992  * types will disappear,which depends on L1 and L2 entries we reuse.
12993  *
12994  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12995  *
12996  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12997  *              later when we define the cloud filter.
12998  *      a.      Valid_flags.replace_cloud = 0
12999  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
13000  *      c.      New_filter = 0x10
13001  *      d.      TR bit = 0xff (optional, not used here)
13002  *      e.      Buffer â€“ 2 entries:
13003  *              i.      Byte 0 = 8 (outer vlan FV index).
13004  *                      Byte 1 = 0 (rsv)
13005  *                      Byte 2-3 = 0x0fff
13006  *              ii.     Byte 0 = 37 (inner vlan FV index).
13007  *                      Byte 1 =0 (rsv)
13008  *                      Byte 2-3 = 0x0fff
13009  *
13010  * Step 2:
13011  * 2.   Create cloud filter using two L1 filters entries: stag and
13012  *              new filter(outer vlan+ inner vlan)
13013  *      a.      Valid_flags.replace_cloud = 1
13014  *      b.      Old_filter = 1 (instead of outer IP)
13015  *      c.      New_filter = 0x10
13016  *      d.      Buffer â€“ 2 entries:
13017  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
13018  *                      Byte 1-3 = 0 (rsv)
13019  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
13020  *                      Byte 9-11 = 0 (rsv)
13021  */
13022 static int
13023 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
13024 {
13025         int ret = -ENOTSUP;
13026         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
13027         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
13028         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13029         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
13030
13031         if (pf->support_multi_driver) {
13032                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
13033                 return ret;
13034         }
13035
13036         /* Init */
13037         memset(&filter_replace, 0,
13038                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13039         memset(&filter_replace_buf, 0,
13040                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13041
13042         /* create L1 filter */
13043         filter_replace.old_filter_type =
13044                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
13045         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13046         filter_replace.tr_bit = 0;
13047
13048         /* Prepare the buffer, 2 entries */
13049         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
13050         filter_replace_buf.data[0] |=
13051                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13052         /* Field Vector 12b mask */
13053         filter_replace_buf.data[2] = 0xff;
13054         filter_replace_buf.data[3] = 0x0f;
13055         filter_replace_buf.data[4] =
13056                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
13057         filter_replace_buf.data[4] |=
13058                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13059         /* Field Vector 12b mask */
13060         filter_replace_buf.data[6] = 0xff;
13061         filter_replace_buf.data[7] = 0x0f;
13062         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13063                         &filter_replace_buf);
13064         if (ret != I40E_SUCCESS)
13065                 return ret;
13066
13067         if (filter_replace.old_filter_type !=
13068             filter_replace.new_filter_type)
13069                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
13070                             " original: 0x%x, new: 0x%x",
13071                             dev->device->name,
13072                             filter_replace.old_filter_type,
13073                             filter_replace.new_filter_type);
13074
13075         /* Apply the second L2 cloud filter */
13076         memset(&filter_replace, 0,
13077                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13078         memset(&filter_replace_buf, 0,
13079                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13080
13081         /* create L2 filter, input for L2 filter will be L1 filter  */
13082         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
13083         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
13084         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13085
13086         /* Prepare the buffer, 2 entries */
13087         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
13088         filter_replace_buf.data[0] |=
13089                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13090         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13091         filter_replace_buf.data[4] |=
13092                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13093         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13094                         &filter_replace_buf);
13095         if (!ret && (filter_replace.old_filter_type !=
13096                      filter_replace.new_filter_type))
13097                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
13098                             " original: 0x%x, new: 0x%x",
13099                             dev->device->name,
13100                             filter_replace.old_filter_type,
13101                             filter_replace.new_filter_type);
13102
13103         return ret;
13104 }
13105
13106 int
13107 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
13108                    const struct rte_flow_action_rss *in)
13109 {
13110         if (in->key_len > RTE_DIM(out->key) ||
13111             in->queue_num > RTE_DIM(out->queue))
13112                 return -EINVAL;
13113         if (!in->key && in->key_len)
13114                 return -EINVAL;
13115         out->conf = (struct rte_flow_action_rss){
13116                 .func = in->func,
13117                 .level = in->level,
13118                 .types = in->types,
13119                 .key_len = in->key_len,
13120                 .queue_num = in->queue_num,
13121                 .queue = memcpy(out->queue, in->queue,
13122                                 sizeof(*in->queue) * in->queue_num),
13123         };
13124         if (in->key)
13125                 out->conf.key = memcpy(out->key, in->key, in->key_len);
13126         return 0;
13127 }
13128
13129 /* Write HENA register to enable hash */
13130 static int
13131 i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
13132 {
13133         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13134         uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
13135         uint64_t hena;
13136         int ret;
13137
13138         ret = i40e_set_rss_key(pf->main_vsi, key,
13139                                rss_conf->conf.key_len);
13140         if (ret)
13141                 return ret;
13142
13143         hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
13144         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
13145         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
13146         I40E_WRITE_FLUSH(hw);
13147
13148         return 0;
13149 }
13150
13151 /* Configure hash input set */
13152 static int
13153 i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
13154 {
13155         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13156         struct rte_eth_input_set_conf conf;
13157         uint64_t mask0;
13158         int ret = 0;
13159         uint32_t j;
13160         int i;
13161         static const struct {
13162                 uint64_t type;
13163                 enum rte_eth_input_set_field field;
13164         } inset_match_table[] = {
13165                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
13166                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13167                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
13168                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13169                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
13170                         RTE_ETH_INPUT_SET_UNKNOWN},
13171                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
13172                         RTE_ETH_INPUT_SET_UNKNOWN},
13173
13174                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
13175                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13176                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
13177                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13178                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
13179                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13180                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
13181                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13182
13183                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
13184                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13185                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
13186                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13187                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
13188                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13189                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
13190                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13191
13192                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
13193                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13194                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
13195                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13196                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
13197                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13198                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
13199                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13200
13201                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
13202                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13203                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
13204                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13205                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
13206                         RTE_ETH_INPUT_SET_UNKNOWN},
13207                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
13208                         RTE_ETH_INPUT_SET_UNKNOWN},
13209
13210                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
13211                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13212                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
13213                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13214                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
13215                         RTE_ETH_INPUT_SET_UNKNOWN},
13216                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
13217                         RTE_ETH_INPUT_SET_UNKNOWN},
13218
13219                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
13220                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13221                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
13222                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13223                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
13224                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13225                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
13226                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13227
13228                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
13229                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13230                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
13231                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13232                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
13233                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13234                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
13235                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13236
13237                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
13238                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13239                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
13240                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13241                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
13242                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13243                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
13244                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13245
13246                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
13247                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13248                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
13249                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13250                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
13251                         RTE_ETH_INPUT_SET_UNKNOWN},
13252                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
13253                         RTE_ETH_INPUT_SET_UNKNOWN},
13254         };
13255
13256         mask0 = types & pf->adapter->flow_types_mask;
13257         conf.op = RTE_ETH_INPUT_SET_SELECT;
13258         conf.inset_size = 0;
13259         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
13260                 if (mask0 & (1ULL << i)) {
13261                         conf.flow_type = i;
13262                         break;
13263                 }
13264         }
13265
13266         for (j = 0; j < RTE_DIM(inset_match_table); j++) {
13267                 if ((types & inset_match_table[j].type) ==
13268                     inset_match_table[j].type) {
13269                         if (inset_match_table[j].field ==
13270                             RTE_ETH_INPUT_SET_UNKNOWN)
13271                                 return -EINVAL;
13272
13273                         conf.field[conf.inset_size] =
13274                                 inset_match_table[j].field;
13275                         conf.inset_size++;
13276                 }
13277         }
13278
13279         if (conf.inset_size) {
13280                 ret = i40e_hash_filter_inset_select(hw, &conf);
13281                 if (ret)
13282                         return ret;
13283         }
13284
13285         return ret;
13286 }
13287
13288 /* Look up the conflicted rule then mark it as invalid */
13289 static void
13290 i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
13291                 struct i40e_rte_flow_rss_conf *conf)
13292 {
13293         struct i40e_rss_filter *rss_item;
13294         uint64_t rss_inset;
13295
13296         /* Clear input set bits before comparing the pctype */
13297         rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
13298                 ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
13299
13300         /* Look up the conflicted rule then mark it as invalid */
13301         TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
13302                 if (!rss_item->rss_filter_info.valid)
13303                         continue;
13304
13305                 if (conf->conf.queue_num &&
13306                     rss_item->rss_filter_info.conf.queue_num)
13307                         rss_item->rss_filter_info.valid = false;
13308
13309                 if (conf->conf.types &&
13310                     (rss_item->rss_filter_info.conf.types &
13311                     rss_inset) ==
13312                     (conf->conf.types & rss_inset))
13313                         rss_item->rss_filter_info.valid = false;
13314
13315                 if (conf->conf.func ==
13316                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
13317                     rss_item->rss_filter_info.conf.func ==
13318                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13319                         rss_item->rss_filter_info.valid = false;
13320         }
13321 }
13322
13323 /* Configure RSS hash function */
13324 static int
13325 i40e_rss_config_hash_function(struct i40e_pf *pf,
13326                 struct i40e_rte_flow_rss_conf *conf)
13327 {
13328         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13329         uint32_t reg, i;
13330         uint64_t mask0;
13331         uint16_t j;
13332
13333         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13334                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13335                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
13336                         PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
13337                         I40E_WRITE_FLUSH(hw);
13338                         i40e_rss_mark_invalid_rule(pf, conf);
13339
13340                         return 0;
13341                 }
13342                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
13343
13344                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13345                 I40E_WRITE_FLUSH(hw);
13346                 i40e_rss_mark_invalid_rule(pf, conf);
13347         } else if (conf->conf.func ==
13348                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13349                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13350
13351                 i40e_set_symmetric_hash_enable_per_port(hw, 1);
13352                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13353                         if (mask0 & (1UL << i))
13354                                 break;
13355                 }
13356
13357                 if (i == UINT64_BIT)
13358                         return -EINVAL;
13359
13360                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13361                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13362                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13363                                 i40e_write_global_rx_ctl(hw,
13364                                         I40E_GLQF_HSYM(j),
13365                                         I40E_GLQF_HSYM_SYMH_ENA_MASK);
13366                 }
13367         }
13368
13369         return 0;
13370 }
13371
13372 /* Enable RSS according to the configuration */
13373 static int
13374 i40e_rss_enable_hash(struct i40e_pf *pf,
13375                 struct i40e_rte_flow_rss_conf *conf)
13376 {
13377         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13378         struct i40e_rte_flow_rss_conf rss_conf;
13379
13380         if (!(conf->conf.types & pf->adapter->flow_types_mask))
13381                 return -ENOTSUP;
13382
13383         memset(&rss_conf, 0, sizeof(rss_conf));
13384         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13385
13386         /* Configure hash input set */
13387         if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
13388                 return -EINVAL;
13389
13390         if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
13391             (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
13392                 /* Random default keys */
13393                 static uint32_t rss_key_default[] = {0x6b793944,
13394                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
13395                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
13396                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
13397
13398                 rss_conf.conf.key = (uint8_t *)rss_key_default;
13399                 rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
13400                                 sizeof(uint32_t);
13401                 PMD_DRV_LOG(INFO,
13402                         "No valid RSS key config for i40e, using default\n");
13403         }
13404
13405         rss_conf.conf.types |= rss_info->conf.types;
13406         i40e_rss_hash_set(pf, &rss_conf);
13407
13408         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
13409                 i40e_rss_config_hash_function(pf, conf);
13410
13411         i40e_rss_mark_invalid_rule(pf, conf);
13412
13413         return 0;
13414 }
13415
13416 /* Configure RSS queue region */
13417 static int
13418 i40e_rss_config_queue_region(struct i40e_pf *pf,
13419                 struct i40e_rte_flow_rss_conf *conf)
13420 {
13421         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13422         uint32_t lut = 0;
13423         uint16_t j, num;
13424         uint32_t i;
13425
13426         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13427          * It's necessary to calculate the actual PF queues that are configured.
13428          */
13429         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13430                 num = i40e_pf_calc_configured_queues_num(pf);
13431         else
13432                 num = pf->dev_data->nb_rx_queues;
13433
13434         num = RTE_MIN(num, conf->conf.queue_num);
13435         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13436                         num);
13437
13438         if (num == 0) {
13439                 PMD_DRV_LOG(ERR,
13440                         "No PF queues are configured to enable RSS for port %u",
13441                         pf->dev_data->port_id);
13442                 return -ENOTSUP;
13443         }
13444
13445         /* Fill in redirection table */
13446         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13447                 if (j == num)
13448                         j = 0;
13449                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
13450                         hw->func_caps.rss_table_entry_width) - 1));
13451                 if ((i & 3) == 3)
13452                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13453         }
13454
13455         i40e_rss_mark_invalid_rule(pf, conf);
13456
13457         return 0;
13458 }
13459
13460 /* Configure RSS hash function to default */
13461 static int
13462 i40e_rss_clear_hash_function(struct i40e_pf *pf,
13463                 struct i40e_rte_flow_rss_conf *conf)
13464 {
13465         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13466         uint32_t i, reg;
13467         uint64_t mask0;
13468         uint16_t j;
13469
13470         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13471                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13472                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
13473                         PMD_DRV_LOG(DEBUG,
13474                                 "Hash function already set to Toeplitz");
13475                         I40E_WRITE_FLUSH(hw);
13476
13477                         return 0;
13478                 }
13479                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
13480
13481                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13482                 I40E_WRITE_FLUSH(hw);
13483         } else if (conf->conf.func ==
13484                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13485                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13486
13487                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13488                         if (mask0 & (1UL << i))
13489                                 break;
13490                 }
13491
13492                 if (i == UINT64_BIT)
13493                         return -EINVAL;
13494
13495                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13496                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13497                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13498                                 i40e_write_global_rx_ctl(hw,
13499                                         I40E_GLQF_HSYM(j),
13500                                         0);
13501                 }
13502         }
13503
13504         return 0;
13505 }
13506
13507 /* Disable RSS hash and configure default input set */
13508 static int
13509 i40e_rss_disable_hash(struct i40e_pf *pf,
13510                 struct i40e_rte_flow_rss_conf *conf)
13511 {
13512         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13513         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13514         struct i40e_rte_flow_rss_conf rss_conf;
13515         uint32_t i;
13516
13517         memset(&rss_conf, 0, sizeof(rss_conf));
13518         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13519
13520         /* Disable RSS hash */
13521         rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
13522         i40e_rss_hash_set(pf, &rss_conf);
13523
13524         for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
13525                 if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
13526                     !(conf->conf.types & (1ULL << i)))
13527                         continue;
13528
13529                 /* Configure default input set */
13530                 struct rte_eth_input_set_conf input_conf = {
13531                         .op = RTE_ETH_INPUT_SET_SELECT,
13532                         .flow_type = i,
13533                         .inset_size = 1,
13534                 };
13535                 input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
13536                 i40e_hash_filter_inset_select(hw, &input_conf);
13537         }
13538
13539         rss_info->conf.types = rss_conf.conf.types;
13540
13541         i40e_rss_clear_hash_function(pf, conf);
13542
13543         return 0;
13544 }
13545
13546 /* Configure RSS queue region to default */
13547 static int
13548 i40e_rss_clear_queue_region(struct i40e_pf *pf)
13549 {
13550         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13551         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13552         uint16_t queue[I40E_MAX_Q_PER_TC];
13553         uint32_t num_rxq, i;
13554         uint32_t lut = 0;
13555         uint16_t j, num;
13556
13557         num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
13558
13559         for (j = 0; j < num_rxq; j++)
13560                 queue[j] = j;
13561
13562         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13563          * It's necessary to calculate the actual PF queues that are configured.
13564          */
13565         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13566                 num = i40e_pf_calc_configured_queues_num(pf);
13567         else
13568                 num = pf->dev_data->nb_rx_queues;
13569
13570         num = RTE_MIN(num, num_rxq);
13571         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13572                         num);
13573
13574         if (num == 0) {
13575                 PMD_DRV_LOG(ERR,
13576                         "No PF queues are configured to enable RSS for port %u",
13577                         pf->dev_data->port_id);
13578                 return -ENOTSUP;
13579         }
13580
13581         /* Fill in redirection table */
13582         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13583                 if (j == num)
13584                         j = 0;
13585                 lut = (lut << 8) | (queue[j] & ((0x1 <<
13586                         hw->func_caps.rss_table_entry_width) - 1));
13587                 if ((i & 3) == 3)
13588                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13589         }
13590
13591         rss_info->conf.queue_num = 0;
13592         memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
13593
13594         return 0;
13595 }
13596
13597 int
13598 i40e_config_rss_filter(struct i40e_pf *pf,
13599                 struct i40e_rte_flow_rss_conf *conf, bool add)
13600 {
13601         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13602         struct rte_flow_action_rss update_conf = rss_info->conf;
13603         int ret = 0;
13604
13605         if (add) {
13606                 if (conf->conf.queue_num) {
13607                         /* Configure RSS queue region */
13608                         ret = i40e_rss_config_queue_region(pf, conf);
13609                         if (ret)
13610                                 return ret;
13611
13612                         update_conf.queue_num = conf->conf.queue_num;
13613                         update_conf.queue = conf->conf.queue;
13614                 } else if (conf->conf.func ==
13615                            RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13616                         /* Configure hash function */
13617                         ret = i40e_rss_config_hash_function(pf, conf);
13618                         if (ret)
13619                                 return ret;
13620
13621                         update_conf.func = conf->conf.func;
13622                 } else {
13623                         /* Configure hash enable and input set */
13624                         ret = i40e_rss_enable_hash(pf, conf);
13625                         if (ret)
13626                                 return ret;
13627
13628                         update_conf.types |= conf->conf.types;
13629                         update_conf.key = conf->conf.key;
13630                         update_conf.key_len = conf->conf.key_len;
13631                 }
13632
13633                 /* Update RSS info in pf */
13634                 if (i40e_rss_conf_init(rss_info, &update_conf))
13635                         return -EINVAL;
13636         } else {
13637                 if (!conf->valid)
13638                         return 0;
13639
13640                 if (conf->conf.queue_num)
13641                         i40e_rss_clear_queue_region(pf);
13642                 else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13643                         i40e_rss_clear_hash_function(pf, conf);
13644                 else
13645                         i40e_rss_disable_hash(pf, conf);
13646         }
13647
13648         return 0;
13649 }
13650
13651 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
13652 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
13653 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
13654 RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
13655 #endif
13656 #ifdef RTE_LIBRTE_I40E_DEBUG_TX
13657 RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
13658 #endif
13659 #ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
13660 RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG);
13661 #endif
13662
13663 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
13664                               ETH_I40E_FLOATING_VEB_ARG "=1"
13665                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
13666                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
13667                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
13668                               ETH_I40E_USE_LATEST_VEC "=0|1");