ethdev: reset all when releasing a port
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
46 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG   "queue-num-per-vf"
47 #define ETH_I40E_USE_LATEST_VEC "use-latest-supported-vec"
48 #define ETH_I40E_VF_MSG_CFG             "vf_msg_cfg"
49
50 #define I40E_CLEAR_PXE_WAIT_MS     200
51 #define I40E_VSI_TSR_QINQ_STRIP         0x4010
52 #define I40E_VSI_TSR(_i)        (0x00050800 + ((_i) * 4))
53
54 /* Maximun number of capability elements */
55 #define I40E_MAX_CAP_ELE_NUM       128
56
57 /* Wait count and interval */
58 #define I40E_CHK_Q_ENA_COUNT       1000
59 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
60
61 /* Maximun number of VSI */
62 #define I40E_MAX_NUM_VSIS          (384UL)
63
64 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
65
66 /* Flow control default timer */
67 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
68
69 /* Flow control enable fwd bit */
70 #define I40E_PRTMAC_FWD_CTRL   0x00000001
71
72 /* Receive Packet Buffer size */
73 #define I40E_RXPBSIZE (968 * 1024)
74
75 /* Kilobytes shift */
76 #define I40E_KILOSHIFT 10
77
78 /* Flow control default high water */
79 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
80
81 /* Flow control default low water */
82 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
83
84 /* Receive Average Packet Size in Byte*/
85 #define I40E_PACKET_AVERAGE_SIZE 128
86
87 /* Mask of PF interrupt causes */
88 #define I40E_PFINT_ICR0_ENA_MASK ( \
89                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
91                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
92                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
93                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
94                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
95                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
96                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
97                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
98
99 #define I40E_FLOW_TYPES ( \
100         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
105         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
107         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
108         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
109         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
110         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
111
112 /* Additional timesync values. */
113 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
114 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
115 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
116 #define I40E_PRTTSYN_TSYNENA     0x80000000
117 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
118 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
119
120 /**
121  * Below are values for writing un-exposed registers suggested
122  * by silicon experts
123  */
124 /* Destination MAC address */
125 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
126 /* Source MAC address */
127 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
128 /* Outer (S-Tag) VLAN tag in the outer L2 header */
129 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
130 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
131 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
132 /* Single VLAN tag in the inner L2 header */
133 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
134 /* Source IPv4 address */
135 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
136 /* Destination IPv4 address */
137 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
138 /* Source IPv4 address for X722 */
139 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
140 /* Destination IPv4 address for X722 */
141 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
142 /* IPv4 Protocol for X722 */
143 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
144 /* IPv4 Time to Live for X722 */
145 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
146 /* IPv4 Type of Service (TOS) */
147 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
148 /* IPv4 Protocol */
149 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
150 /* IPv4 Time to Live */
151 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
152 /* Source IPv6 address */
153 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
154 /* Destination IPv6 address */
155 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
156 /* IPv6 Traffic Class (TC) */
157 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
158 /* IPv6 Next Header */
159 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
160 /* IPv6 Hop Limit */
161 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
162 /* Source L4 port */
163 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
164 /* Destination L4 port */
165 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
166 /* SCTP verification tag */
167 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
168 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
169 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
170 /* Source port of tunneling UDP */
171 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
172 /* Destination port of tunneling UDP */
173 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
174 /* UDP Tunneling ID, NVGRE/GRE key */
175 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
176 /* Last ether type */
177 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
178 /* Tunneling outer destination IPv4 address */
179 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
180 /* Tunneling outer destination IPv6 address */
181 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
182 /* 1st word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
184 /* 2nd word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
186 /* 3rd word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
188 /* 4th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
190 /* 5th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
192 /* 6th word of flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
194 /* 7th word of flex payload */
195 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
196 /* 8th word of flex payload */
197 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
198 /* all 8 words flex payload */
199 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
200 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
201
202 #define I40E_TRANSLATE_INSET 0
203 #define I40E_TRANSLATE_REG   1
204
205 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
206 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
207 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
208 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
209 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
210 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
211
212 /* PCI offset for querying capability */
213 #define PCI_DEV_CAP_REG            0xA4
214 /* PCI offset for enabling/disabling Extended Tag */
215 #define PCI_DEV_CTRL_REG           0xA8
216 /* Bit mask of Extended Tag capability */
217 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
218 /* Bit shift of Extended Tag enable/disable */
219 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
220 /* Bit mask of Extended Tag enable/disable */
221 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
222
223 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
224 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
225 static int i40e_dev_configure(struct rte_eth_dev *dev);
226 static int i40e_dev_start(struct rte_eth_dev *dev);
227 static void i40e_dev_stop(struct rte_eth_dev *dev);
228 static int i40e_dev_close(struct rte_eth_dev *dev);
229 static int  i40e_dev_reset(struct rte_eth_dev *dev);
230 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
231 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
232 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
233 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
234 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
235 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
236 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
237                                struct rte_eth_stats *stats);
238 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
239                                struct rte_eth_xstat *xstats, unsigned n);
240 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
241                                      struct rte_eth_xstat_name *xstats_names,
242                                      unsigned limit);
243 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
244 static int i40e_fw_version_get(struct rte_eth_dev *dev,
245                                 char *fw_version, size_t fw_size);
246 static int i40e_dev_info_get(struct rte_eth_dev *dev,
247                              struct rte_eth_dev_info *dev_info);
248 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
249                                 uint16_t vlan_id,
250                                 int on);
251 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
252                               enum rte_vlan_type vlan_type,
253                               uint16_t tpid);
254 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
255 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
256                                       uint16_t queue,
257                                       int on);
258 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
259 static int i40e_dev_led_on(struct rte_eth_dev *dev);
260 static int i40e_dev_led_off(struct rte_eth_dev *dev);
261 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
264                               struct rte_eth_fc_conf *fc_conf);
265 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
266                                        struct rte_eth_pfc_conf *pfc_conf);
267 static int i40e_macaddr_add(struct rte_eth_dev *dev,
268                             struct rte_ether_addr *mac_addr,
269                             uint32_t index,
270                             uint32_t pool);
271 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
272 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
273                                     struct rte_eth_rss_reta_entry64 *reta_conf,
274                                     uint16_t reta_size);
275 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
276                                    struct rte_eth_rss_reta_entry64 *reta_conf,
277                                    uint16_t reta_size);
278
279 static int i40e_get_cap(struct i40e_hw *hw);
280 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
281 static int i40e_pf_setup(struct i40e_pf *pf);
282 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
283 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
284 static int i40e_dcb_setup(struct rte_eth_dev *dev);
285 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
286                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
287 static void i40e_stat_update_48(struct i40e_hw *hw,
288                                uint32_t hireg,
289                                uint32_t loreg,
290                                bool offset_loaded,
291                                uint64_t *offset,
292                                uint64_t *stat);
293 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
294 static void i40e_dev_interrupt_handler(void *param);
295 static void i40e_dev_alarm_handler(void *param);
296 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
297                                 uint32_t base, uint32_t num);
298 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
299 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
300                         uint32_t base);
301 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
302                         uint16_t num);
303 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
304 static int i40e_veb_release(struct i40e_veb *veb);
305 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
306                                                 struct i40e_vsi *vsi);
307 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
308 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
309                                              struct i40e_macvlan_filter *mv_f,
310                                              int num,
311                                              uint16_t vlan);
312 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
313 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
314                                     struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
316                                       struct rte_eth_rss_conf *rss_conf);
317 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
320                                         struct rte_eth_udp_tunnel *udp_tunnel);
321 static void i40e_filter_input_set_init(struct i40e_pf *pf);
322 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
323                                 enum rte_filter_op filter_op,
324                                 void *arg);
325 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
326                                 enum rte_filter_type filter_type,
327                                 enum rte_filter_op filter_op,
328                                 void *arg);
329 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
330                                   struct rte_eth_dcb_info *dcb_info);
331 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
332 static void i40e_configure_registers(struct i40e_hw *hw);
333 static void i40e_hw_init(struct rte_eth_dev *dev);
334 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
335 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
336                                                      uint16_t seid,
337                                                      uint16_t rule_type,
338                                                      uint16_t *entries,
339                                                      uint16_t count,
340                                                      uint16_t rule_id);
341 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
342                         struct rte_eth_mirror_conf *mirror_conf,
343                         uint8_t sw_id, uint8_t on);
344 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
345
346 static int i40e_timesync_enable(struct rte_eth_dev *dev);
347 static int i40e_timesync_disable(struct rte_eth_dev *dev);
348 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
349                                            struct timespec *timestamp,
350                                            uint32_t flags);
351 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
352                                            struct timespec *timestamp);
353 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
354
355 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
356
357 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
358                                    struct timespec *timestamp);
359 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
360                                     const struct timespec *timestamp);
361
362 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
363                                          uint16_t queue_id);
364 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
365                                           uint16_t queue_id);
366
367 static int i40e_get_regs(struct rte_eth_dev *dev,
368                          struct rte_dev_reg_info *regs);
369
370 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
371
372 static int i40e_get_eeprom(struct rte_eth_dev *dev,
373                            struct rte_dev_eeprom_info *eeprom);
374
375 static int i40e_get_module_info(struct rte_eth_dev *dev,
376                                 struct rte_eth_dev_module_info *modinfo);
377 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
378                                   struct rte_dev_eeprom_info *info);
379
380 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
381                                       struct rte_ether_addr *mac_addr);
382
383 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
384
385 static int i40e_ethertype_filter_convert(
386         const struct rte_eth_ethertype_filter *input,
387         struct i40e_ethertype_filter *filter);
388 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
389                                    struct i40e_ethertype_filter *filter);
390
391 static int i40e_tunnel_filter_convert(
392         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
393         struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
395                                 struct i40e_tunnel_filter *tunnel_filter);
396 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
397
398 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
399 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
400 static void i40e_filter_restore(struct i40e_pf *pf);
401 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
402 static int i40e_pf_config_rss(struct i40e_pf *pf);
403
404 static const char *const valid_keys[] = {
405         ETH_I40E_FLOATING_VEB_ARG,
406         ETH_I40E_FLOATING_VEB_LIST_ARG,
407         ETH_I40E_SUPPORT_MULTI_DRIVER,
408         ETH_I40E_QUEUE_NUM_PER_VF_ARG,
409         ETH_I40E_USE_LATEST_VEC,
410         ETH_I40E_VF_MSG_CFG,
411         NULL};
412
413 static const struct rte_pci_id pci_id_i40e_map[] = {
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
425         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
426         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
440         { .vendor_id = 0, /* sentinel */ },
441 };
442
443 static const struct eth_dev_ops i40e_eth_dev_ops = {
444         .dev_configure                = i40e_dev_configure,
445         .dev_start                    = i40e_dev_start,
446         .dev_stop                     = i40e_dev_stop,
447         .dev_close                    = i40e_dev_close,
448         .dev_reset                    = i40e_dev_reset,
449         .promiscuous_enable           = i40e_dev_promiscuous_enable,
450         .promiscuous_disable          = i40e_dev_promiscuous_disable,
451         .allmulticast_enable          = i40e_dev_allmulticast_enable,
452         .allmulticast_disable         = i40e_dev_allmulticast_disable,
453         .dev_set_link_up              = i40e_dev_set_link_up,
454         .dev_set_link_down            = i40e_dev_set_link_down,
455         .link_update                  = i40e_dev_link_update,
456         .stats_get                    = i40e_dev_stats_get,
457         .xstats_get                   = i40e_dev_xstats_get,
458         .xstats_get_names             = i40e_dev_xstats_get_names,
459         .stats_reset                  = i40e_dev_stats_reset,
460         .xstats_reset                 = i40e_dev_stats_reset,
461         .fw_version_get               = i40e_fw_version_get,
462         .dev_infos_get                = i40e_dev_info_get,
463         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
464         .vlan_filter_set              = i40e_vlan_filter_set,
465         .vlan_tpid_set                = i40e_vlan_tpid_set,
466         .vlan_offload_set             = i40e_vlan_offload_set,
467         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
468         .vlan_pvid_set                = i40e_vlan_pvid_set,
469         .rx_queue_start               = i40e_dev_rx_queue_start,
470         .rx_queue_stop                = i40e_dev_rx_queue_stop,
471         .tx_queue_start               = i40e_dev_tx_queue_start,
472         .tx_queue_stop                = i40e_dev_tx_queue_stop,
473         .rx_queue_setup               = i40e_dev_rx_queue_setup,
474         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
475         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
476         .rx_queue_release             = i40e_dev_rx_queue_release,
477         .tx_queue_setup               = i40e_dev_tx_queue_setup,
478         .tx_queue_release             = i40e_dev_tx_queue_release,
479         .dev_led_on                   = i40e_dev_led_on,
480         .dev_led_off                  = i40e_dev_led_off,
481         .flow_ctrl_get                = i40e_flow_ctrl_get,
482         .flow_ctrl_set                = i40e_flow_ctrl_set,
483         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
484         .mac_addr_add                 = i40e_macaddr_add,
485         .mac_addr_remove              = i40e_macaddr_remove,
486         .reta_update                  = i40e_dev_rss_reta_update,
487         .reta_query                   = i40e_dev_rss_reta_query,
488         .rss_hash_update              = i40e_dev_rss_hash_update,
489         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
490         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
491         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
492         .filter_ctrl                  = i40e_dev_filter_ctrl,
493         .rxq_info_get                 = i40e_rxq_info_get,
494         .txq_info_get                 = i40e_txq_info_get,
495         .rx_burst_mode_get            = i40e_rx_burst_mode_get,
496         .tx_burst_mode_get            = i40e_tx_burst_mode_get,
497         .mirror_rule_set              = i40e_mirror_rule_set,
498         .mirror_rule_reset            = i40e_mirror_rule_reset,
499         .timesync_enable              = i40e_timesync_enable,
500         .timesync_disable             = i40e_timesync_disable,
501         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
502         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
503         .get_dcb_info                 = i40e_dev_get_dcb_info,
504         .timesync_adjust_time         = i40e_timesync_adjust_time,
505         .timesync_read_time           = i40e_timesync_read_time,
506         .timesync_write_time          = i40e_timesync_write_time,
507         .get_reg                      = i40e_get_regs,
508         .get_eeprom_length            = i40e_get_eeprom_length,
509         .get_eeprom                   = i40e_get_eeprom,
510         .get_module_info              = i40e_get_module_info,
511         .get_module_eeprom            = i40e_get_module_eeprom,
512         .mac_addr_set                 = i40e_set_default_mac_addr,
513         .mtu_set                      = i40e_dev_mtu_set,
514         .tm_ops_get                   = i40e_tm_ops_get,
515         .tx_done_cleanup              = i40e_tx_done_cleanup,
516 };
517
518 /* store statistics names and its offset in stats structure */
519 struct rte_i40e_xstats_name_off {
520         char name[RTE_ETH_XSTATS_NAME_SIZE];
521         unsigned offset;
522 };
523
524 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
525         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
526         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
527         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
528         {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
529         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
530                 rx_unknown_protocol)},
531         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
532         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
533         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
534         {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
535 };
536
537 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
538                 sizeof(rte_i40e_stats_strings[0]))
539
540 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
541         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
542                 tx_dropped_link_down)},
543         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
544         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
545                 illegal_bytes)},
546         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
547         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
548                 mac_local_faults)},
549         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
550                 mac_remote_faults)},
551         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
552                 rx_length_errors)},
553         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
554         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
555         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
556         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
557         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
558         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
559                 rx_size_127)},
560         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
561                 rx_size_255)},
562         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
563                 rx_size_511)},
564         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
565                 rx_size_1023)},
566         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
567                 rx_size_1522)},
568         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
569                 rx_size_big)},
570         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
571                 rx_undersize)},
572         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
573                 rx_oversize)},
574         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
575                 mac_short_packet_dropped)},
576         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
577                 rx_fragments)},
578         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
579         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
580         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
581                 tx_size_127)},
582         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
583                 tx_size_255)},
584         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
585                 tx_size_511)},
586         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
587                 tx_size_1023)},
588         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
589                 tx_size_1522)},
590         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
591                 tx_size_big)},
592         {"rx_flow_director_atr_match_packets",
593                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
594         {"rx_flow_director_sb_match_packets",
595                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
596         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
597                 tx_lpi_status)},
598         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
599                 rx_lpi_status)},
600         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
601                 tx_lpi_count)},
602         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
603                 rx_lpi_count)},
604 };
605
606 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
607                 sizeof(rte_i40e_hw_port_strings[0]))
608
609 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
610         {"xon_packets", offsetof(struct i40e_hw_port_stats,
611                 priority_xon_rx)},
612         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
613                 priority_xoff_rx)},
614 };
615
616 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
617                 sizeof(rte_i40e_rxq_prio_strings[0]))
618
619 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
620         {"xon_packets", offsetof(struct i40e_hw_port_stats,
621                 priority_xon_tx)},
622         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
623                 priority_xoff_tx)},
624         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xon_2_xoff)},
626 };
627
628 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
629                 sizeof(rte_i40e_txq_prio_strings[0]))
630
631 static int
632 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
633         struct rte_pci_device *pci_dev)
634 {
635         char name[RTE_ETH_NAME_MAX_LEN];
636         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
637         int i, retval;
638
639         if (pci_dev->device.devargs) {
640                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
641                                 &eth_da);
642                 if (retval)
643                         return retval;
644         }
645
646         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
647                 sizeof(struct i40e_adapter),
648                 eth_dev_pci_specific_init, pci_dev,
649                 eth_i40e_dev_init, NULL);
650
651         if (retval || eth_da.nb_representor_ports < 1)
652                 return retval;
653
654         /* probe VF representor ports */
655         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
656                 pci_dev->device.name);
657
658         if (pf_ethdev == NULL)
659                 return -ENODEV;
660
661         for (i = 0; i < eth_da.nb_representor_ports; i++) {
662                 struct i40e_vf_representor representor = {
663                         .vf_id = eth_da.representor_ports[i],
664                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
665                                 pf_ethdev->data->dev_private)->switch_domain_id,
666                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
667                                 pf_ethdev->data->dev_private)
668                 };
669
670                 /* representor port net_bdf_port */
671                 snprintf(name, sizeof(name), "net_%s_representor_%d",
672                         pci_dev->device.name, eth_da.representor_ports[i]);
673
674                 retval = rte_eth_dev_create(&pci_dev->device, name,
675                         sizeof(struct i40e_vf_representor), NULL, NULL,
676                         i40e_vf_representor_init, &representor);
677
678                 if (retval)
679                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
680                                 "representor %s.", name);
681         }
682
683         return 0;
684 }
685
686 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
687 {
688         struct rte_eth_dev *ethdev;
689
690         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
691         if (!ethdev)
692                 return 0;
693
694         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
695                 return rte_eth_dev_pci_generic_remove(pci_dev,
696                                         i40e_vf_representor_uninit);
697         else
698                 return rte_eth_dev_pci_generic_remove(pci_dev,
699                                                 eth_i40e_dev_uninit);
700 }
701
702 static struct rte_pci_driver rte_i40e_pmd = {
703         .id_table = pci_id_i40e_map,
704         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
705         .probe = eth_i40e_pci_probe,
706         .remove = eth_i40e_pci_remove,
707 };
708
709 static inline void
710 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
711                          uint32_t reg_val)
712 {
713         uint32_t ori_reg_val;
714         struct rte_eth_dev *dev;
715
716         ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
717         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
718         i40e_write_rx_ctl(hw, reg_addr, reg_val);
719         if (ori_reg_val != reg_val)
720                 PMD_DRV_LOG(WARNING,
721                             "i40e device %s changed global register [0x%08x]."
722                             " original: 0x%08x, new: 0x%08x",
723                             dev->device->name, reg_addr, ori_reg_val, reg_val);
724 }
725
726 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
727 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
728 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
729
730 #ifndef I40E_GLQF_ORT
731 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
732 #endif
733 #ifndef I40E_GLQF_PIT
734 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
735 #endif
736 #ifndef I40E_GLQF_L3_MAP
737 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
738 #endif
739
740 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
741 {
742         /*
743          * Initialize registers for parsing packet type of QinQ
744          * This should be removed from code once proper
745          * configuration API is added to avoid configuration conflicts
746          * between ports of the same device.
747          */
748         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
749         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
750 }
751
752 static inline void i40e_config_automask(struct i40e_pf *pf)
753 {
754         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
755         uint32_t val;
756
757         /* INTENA flag is not auto-cleared for interrupt */
758         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
759         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
760                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
761
762         /* If support multi-driver, PF will use INT0. */
763         if (!pf->support_multi_driver)
764                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
765
766         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
767 }
768
769 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
770
771 /*
772  * Add a ethertype filter to drop all flow control frames transmitted
773  * from VSIs.
774 */
775 static void
776 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
777 {
778         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
779         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
780                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
781                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
782         int ret;
783
784         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
785                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
786                                 pf->main_vsi_seid, 0,
787                                 TRUE, NULL, NULL);
788         if (ret)
789                 PMD_INIT_LOG(ERR,
790                         "Failed to add filter to drop flow control frames from VSIs.");
791 }
792
793 static int
794 floating_veb_list_handler(__rte_unused const char *key,
795                           const char *floating_veb_value,
796                           void *opaque)
797 {
798         int idx = 0;
799         unsigned int count = 0;
800         char *end = NULL;
801         int min, max;
802         bool *vf_floating_veb = opaque;
803
804         while (isblank(*floating_veb_value))
805                 floating_veb_value++;
806
807         /* Reset floating VEB configuration for VFs */
808         for (idx = 0; idx < I40E_MAX_VF; idx++)
809                 vf_floating_veb[idx] = false;
810
811         min = I40E_MAX_VF;
812         do {
813                 while (isblank(*floating_veb_value))
814                         floating_veb_value++;
815                 if (*floating_veb_value == '\0')
816                         return -1;
817                 errno = 0;
818                 idx = strtoul(floating_veb_value, &end, 10);
819                 if (errno || end == NULL)
820                         return -1;
821                 while (isblank(*end))
822                         end++;
823                 if (*end == '-') {
824                         min = idx;
825                 } else if ((*end == ';') || (*end == '\0')) {
826                         max = idx;
827                         if (min == I40E_MAX_VF)
828                                 min = idx;
829                         if (max >= I40E_MAX_VF)
830                                 max = I40E_MAX_VF - 1;
831                         for (idx = min; idx <= max; idx++) {
832                                 vf_floating_veb[idx] = true;
833                                 count++;
834                         }
835                         min = I40E_MAX_VF;
836                 } else {
837                         return -1;
838                 }
839                 floating_veb_value = end + 1;
840         } while (*end != '\0');
841
842         if (count == 0)
843                 return -1;
844
845         return 0;
846 }
847
848 static void
849 config_vf_floating_veb(struct rte_devargs *devargs,
850                        uint16_t floating_veb,
851                        bool *vf_floating_veb)
852 {
853         struct rte_kvargs *kvlist;
854         int i;
855         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
856
857         if (!floating_veb)
858                 return;
859         /* All the VFs attach to the floating VEB by default
860          * when the floating VEB is enabled.
861          */
862         for (i = 0; i < I40E_MAX_VF; i++)
863                 vf_floating_veb[i] = true;
864
865         if (devargs == NULL)
866                 return;
867
868         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
869         if (kvlist == NULL)
870                 return;
871
872         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
873                 rte_kvargs_free(kvlist);
874                 return;
875         }
876         /* When the floating_veb_list parameter exists, all the VFs
877          * will attach to the legacy VEB firstly, then configure VFs
878          * to the floating VEB according to the floating_veb_list.
879          */
880         if (rte_kvargs_process(kvlist, floating_veb_list,
881                                floating_veb_list_handler,
882                                vf_floating_veb) < 0) {
883                 rte_kvargs_free(kvlist);
884                 return;
885         }
886         rte_kvargs_free(kvlist);
887 }
888
889 static int
890 i40e_check_floating_handler(__rte_unused const char *key,
891                             const char *value,
892                             __rte_unused void *opaque)
893 {
894         if (strcmp(value, "1"))
895                 return -1;
896
897         return 0;
898 }
899
900 static int
901 is_floating_veb_supported(struct rte_devargs *devargs)
902 {
903         struct rte_kvargs *kvlist;
904         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
905
906         if (devargs == NULL)
907                 return 0;
908
909         kvlist = rte_kvargs_parse(devargs->args, valid_keys);
910         if (kvlist == NULL)
911                 return 0;
912
913         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
914                 rte_kvargs_free(kvlist);
915                 return 0;
916         }
917         /* Floating VEB is enabled when there's key-value:
918          * enable_floating_veb=1
919          */
920         if (rte_kvargs_process(kvlist, floating_veb_key,
921                                i40e_check_floating_handler, NULL) < 0) {
922                 rte_kvargs_free(kvlist);
923                 return 0;
924         }
925         rte_kvargs_free(kvlist);
926
927         return 1;
928 }
929
930 static void
931 config_floating_veb(struct rte_eth_dev *dev)
932 {
933         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
934         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
935         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
936
937         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
938
939         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
940                 pf->floating_veb =
941                         is_floating_veb_supported(pci_dev->device.devargs);
942                 config_vf_floating_veb(pci_dev->device.devargs,
943                                        pf->floating_veb,
944                                        pf->floating_veb_list);
945         } else {
946                 pf->floating_veb = false;
947         }
948 }
949
950 #define I40E_L2_TAGS_S_TAG_SHIFT 1
951 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
952
953 static int
954 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
955 {
956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
957         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
958         char ethertype_hash_name[RTE_HASH_NAMESIZE];
959         int ret;
960
961         struct rte_hash_parameters ethertype_hash_params = {
962                 .name = ethertype_hash_name,
963                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
964                 .key_len = sizeof(struct i40e_ethertype_filter_input),
965                 .hash_func = rte_hash_crc,
966                 .hash_func_init_val = 0,
967                 .socket_id = rte_socket_id(),
968         };
969
970         /* Initialize ethertype filter rule list and hash */
971         TAILQ_INIT(&ethertype_rule->ethertype_list);
972         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
973                  "ethertype_%s", dev->device->name);
974         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
975         if (!ethertype_rule->hash_table) {
976                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
977                 return -EINVAL;
978         }
979         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
980                                        sizeof(struct i40e_ethertype_filter *) *
981                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
982                                        0);
983         if (!ethertype_rule->hash_map) {
984                 PMD_INIT_LOG(ERR,
985                              "Failed to allocate memory for ethertype hash map!");
986                 ret = -ENOMEM;
987                 goto err_ethertype_hash_map_alloc;
988         }
989
990         return 0;
991
992 err_ethertype_hash_map_alloc:
993         rte_hash_free(ethertype_rule->hash_table);
994
995         return ret;
996 }
997
998 static int
999 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1000 {
1001         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1002         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1003         char tunnel_hash_name[RTE_HASH_NAMESIZE];
1004         int ret;
1005
1006         struct rte_hash_parameters tunnel_hash_params = {
1007                 .name = tunnel_hash_name,
1008                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1009                 .key_len = sizeof(struct i40e_tunnel_filter_input),
1010                 .hash_func = rte_hash_crc,
1011                 .hash_func_init_val = 0,
1012                 .socket_id = rte_socket_id(),
1013         };
1014
1015         /* Initialize tunnel filter rule list and hash */
1016         TAILQ_INIT(&tunnel_rule->tunnel_list);
1017         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1018                  "tunnel_%s", dev->device->name);
1019         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1020         if (!tunnel_rule->hash_table) {
1021                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1022                 return -EINVAL;
1023         }
1024         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1025                                     sizeof(struct i40e_tunnel_filter *) *
1026                                     I40E_MAX_TUNNEL_FILTER_NUM,
1027                                     0);
1028         if (!tunnel_rule->hash_map) {
1029                 PMD_INIT_LOG(ERR,
1030                              "Failed to allocate memory for tunnel hash map!");
1031                 ret = -ENOMEM;
1032                 goto err_tunnel_hash_map_alloc;
1033         }
1034
1035         return 0;
1036
1037 err_tunnel_hash_map_alloc:
1038         rte_hash_free(tunnel_rule->hash_table);
1039
1040         return ret;
1041 }
1042
1043 static int
1044 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1045 {
1046         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1047         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1048         struct i40e_fdir_info *fdir_info = &pf->fdir;
1049         char fdir_hash_name[RTE_HASH_NAMESIZE];
1050         uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1051         uint32_t best = hw->func_caps.fd_filters_best_effort;
1052         struct rte_bitmap *bmp = NULL;
1053         uint32_t bmp_size;
1054         void *mem = NULL;
1055         uint32_t i = 0;
1056         int ret;
1057
1058         struct rte_hash_parameters fdir_hash_params = {
1059                 .name = fdir_hash_name,
1060                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1061                 .key_len = sizeof(struct i40e_fdir_input),
1062                 .hash_func = rte_hash_crc,
1063                 .hash_func_init_val = 0,
1064                 .socket_id = rte_socket_id(),
1065         };
1066
1067         /* Initialize flow director filter rule list and hash */
1068         TAILQ_INIT(&fdir_info->fdir_list);
1069         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1070                  "fdir_%s", dev->device->name);
1071         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1072         if (!fdir_info->hash_table) {
1073                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1074                 return -EINVAL;
1075         }
1076
1077         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1078                                           sizeof(struct i40e_fdir_filter *) *
1079                                           I40E_MAX_FDIR_FILTER_NUM,
1080                                           0);
1081         if (!fdir_info->hash_map) {
1082                 PMD_INIT_LOG(ERR,
1083                              "Failed to allocate memory for fdir hash map!");
1084                 ret = -ENOMEM;
1085                 goto err_fdir_hash_map_alloc;
1086         }
1087
1088         fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1089                         sizeof(struct i40e_fdir_filter) *
1090                         I40E_MAX_FDIR_FILTER_NUM,
1091                         0);
1092
1093         if (!fdir_info->fdir_filter_array) {
1094                 PMD_INIT_LOG(ERR,
1095                              "Failed to allocate memory for fdir filter array!");
1096                 ret = -ENOMEM;
1097                 goto err_fdir_filter_array_alloc;
1098         }
1099
1100         fdir_info->fdir_space_size = alloc + best;
1101         fdir_info->fdir_actual_cnt = 0;
1102         fdir_info->fdir_guarantee_total_space = alloc;
1103         fdir_info->fdir_guarantee_free_space =
1104                 fdir_info->fdir_guarantee_total_space;
1105
1106         PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1107
1108         fdir_info->fdir_flow_pool.pool =
1109                         rte_zmalloc("i40e_fdir_entry",
1110                                 sizeof(struct i40e_fdir_entry) *
1111                                 fdir_info->fdir_space_size,
1112                                 0);
1113
1114         if (!fdir_info->fdir_flow_pool.pool) {
1115                 PMD_INIT_LOG(ERR,
1116                              "Failed to allocate memory for bitmap flow!");
1117                 ret = -ENOMEM;
1118                 goto err_fdir_bitmap_flow_alloc;
1119         }
1120
1121         for (i = 0; i < fdir_info->fdir_space_size; i++)
1122                 fdir_info->fdir_flow_pool.pool[i].idx = i;
1123
1124         bmp_size =
1125                 rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1126         mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1127         if (mem == NULL) {
1128                 PMD_INIT_LOG(ERR,
1129                              "Failed to allocate memory for fdir bitmap!");
1130                 ret = -ENOMEM;
1131                 goto err_fdir_mem_alloc;
1132         }
1133         bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1134         if (bmp == NULL) {
1135                 PMD_INIT_LOG(ERR,
1136                              "Failed to initialization fdir bitmap!");
1137                 ret = -ENOMEM;
1138                 goto err_fdir_bmp_alloc;
1139         }
1140         for (i = 0; i < fdir_info->fdir_space_size; i++)
1141                 rte_bitmap_set(bmp, i);
1142
1143         fdir_info->fdir_flow_pool.bitmap = bmp;
1144
1145         return 0;
1146
1147 err_fdir_bmp_alloc:
1148         rte_free(mem);
1149 err_fdir_mem_alloc:
1150         rte_free(fdir_info->fdir_flow_pool.pool);
1151 err_fdir_bitmap_flow_alloc:
1152         rte_free(fdir_info->fdir_filter_array);
1153 err_fdir_filter_array_alloc:
1154         rte_free(fdir_info->hash_map);
1155 err_fdir_hash_map_alloc:
1156         rte_hash_free(fdir_info->hash_table);
1157
1158         return ret;
1159 }
1160
1161 static void
1162 i40e_init_customized_info(struct i40e_pf *pf)
1163 {
1164         int i;
1165
1166         /* Initialize customized pctype */
1167         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1168                 pf->customized_pctype[i].index = i;
1169                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1170                 pf->customized_pctype[i].valid = false;
1171         }
1172
1173         pf->gtp_support = false;
1174         pf->esp_support = false;
1175 }
1176
1177 static void
1178 i40e_init_filter_invalidation(struct i40e_pf *pf)
1179 {
1180         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1181         struct i40e_fdir_info *fdir_info = &pf->fdir;
1182         uint32_t glqf_ctl_reg = 0;
1183
1184         glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1185         if (!pf->support_multi_driver) {
1186                 fdir_info->fdir_invalprio = 1;
1187                 glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1188                 PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1189                 i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1190         } else {
1191                 if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1192                         fdir_info->fdir_invalprio = 1;
1193                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1194                 } else {
1195                         fdir_info->fdir_invalprio = 0;
1196                         PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1197                 }
1198         }
1199 }
1200
1201 void
1202 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1203 {
1204         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1206         struct i40e_queue_regions *info = &pf->queue_region;
1207         uint16_t i;
1208
1209         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1210                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1211
1212         memset(info, 0, sizeof(struct i40e_queue_regions));
1213 }
1214
1215 static int
1216 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1217                                const char *value,
1218                                void *opaque)
1219 {
1220         struct i40e_pf *pf;
1221         unsigned long support_multi_driver;
1222         char *end;
1223
1224         pf = (struct i40e_pf *)opaque;
1225
1226         errno = 0;
1227         support_multi_driver = strtoul(value, &end, 10);
1228         if (errno != 0 || end == value || *end != 0) {
1229                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1230                 return -(EINVAL);
1231         }
1232
1233         if (support_multi_driver == 1 || support_multi_driver == 0)
1234                 pf->support_multi_driver = (bool)support_multi_driver;
1235         else
1236                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1237                             "enable global configuration by default."
1238                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1239         return 0;
1240 }
1241
1242 static int
1243 i40e_support_multi_driver(struct rte_eth_dev *dev)
1244 {
1245         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1246         struct rte_kvargs *kvlist;
1247         int kvargs_count;
1248
1249         /* Enable global configuration by default */
1250         pf->support_multi_driver = false;
1251
1252         if (!dev->device->devargs)
1253                 return 0;
1254
1255         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1256         if (!kvlist)
1257                 return -EINVAL;
1258
1259         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1260         if (!kvargs_count) {
1261                 rte_kvargs_free(kvlist);
1262                 return 0;
1263         }
1264
1265         if (kvargs_count > 1)
1266                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1267                             "the first invalid or last valid one is used !",
1268                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1269
1270         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1271                                i40e_parse_multi_drv_handler, pf) < 0) {
1272                 rte_kvargs_free(kvlist);
1273                 return -EINVAL;
1274         }
1275
1276         rte_kvargs_free(kvlist);
1277         return 0;
1278 }
1279
1280 static int
1281 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1282                                     uint32_t reg_addr, uint64_t reg_val,
1283                                     struct i40e_asq_cmd_details *cmd_details)
1284 {
1285         uint64_t ori_reg_val;
1286         struct rte_eth_dev *dev;
1287         int ret;
1288
1289         ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1290         if (ret != I40E_SUCCESS) {
1291                 PMD_DRV_LOG(ERR,
1292                             "Fail to debug read from 0x%08x",
1293                             reg_addr);
1294                 return -EIO;
1295         }
1296         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1297
1298         if (ori_reg_val != reg_val)
1299                 PMD_DRV_LOG(WARNING,
1300                             "i40e device %s changed global register [0x%08x]."
1301                             " original: 0x%"PRIx64", after: 0x%"PRIx64,
1302                             dev->device->name, reg_addr, ori_reg_val, reg_val);
1303
1304         return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1305 }
1306
1307 static int
1308 i40e_parse_latest_vec_handler(__rte_unused const char *key,
1309                                 const char *value,
1310                                 void *opaque)
1311 {
1312         struct i40e_adapter *ad = opaque;
1313         int use_latest_vec;
1314
1315         use_latest_vec = atoi(value);
1316
1317         if (use_latest_vec != 0 && use_latest_vec != 1)
1318                 PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
1319
1320         ad->use_latest_vec = (uint8_t)use_latest_vec;
1321
1322         return 0;
1323 }
1324
1325 static int
1326 i40e_use_latest_vec(struct rte_eth_dev *dev)
1327 {
1328         struct i40e_adapter *ad =
1329                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1330         struct rte_kvargs *kvlist;
1331         int kvargs_count;
1332
1333         ad->use_latest_vec = false;
1334
1335         if (!dev->device->devargs)
1336                 return 0;
1337
1338         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1339         if (!kvlist)
1340                 return -EINVAL;
1341
1342         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
1343         if (!kvargs_count) {
1344                 rte_kvargs_free(kvlist);
1345                 return 0;
1346         }
1347
1348         if (kvargs_count > 1)
1349                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1350                             "the first invalid or last valid one is used !",
1351                             ETH_I40E_USE_LATEST_VEC);
1352
1353         if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
1354                                 i40e_parse_latest_vec_handler, ad) < 0) {
1355                 rte_kvargs_free(kvlist);
1356                 return -EINVAL;
1357         }
1358
1359         rte_kvargs_free(kvlist);
1360         return 0;
1361 }
1362
1363 static int
1364 read_vf_msg_config(__rte_unused const char *key,
1365                                const char *value,
1366                                void *opaque)
1367 {
1368         struct i40e_vf_msg_cfg *cfg = opaque;
1369
1370         if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1371                         &cfg->ignore_second) != 3) {
1372                 memset(cfg, 0, sizeof(*cfg));
1373                 PMD_DRV_LOG(ERR, "format error! example: "
1374                                 "%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1375                 return -EINVAL;
1376         }
1377
1378         /*
1379          * If the message validation function been enabled, the 'period'
1380          * and 'ignore_second' must greater than 0.
1381          */
1382         if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1383                 memset(cfg, 0, sizeof(*cfg));
1384                 PMD_DRV_LOG(ERR, "%s error! the second and third"
1385                                 " number must be greater than 0!",
1386                                 ETH_I40E_VF_MSG_CFG);
1387                 return -EINVAL;
1388         }
1389
1390         return 0;
1391 }
1392
1393 static int
1394 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1395                 struct i40e_vf_msg_cfg *msg_cfg)
1396 {
1397         struct rte_kvargs *kvlist;
1398         int kvargs_count;
1399         int ret = 0;
1400
1401         memset(msg_cfg, 0, sizeof(*msg_cfg));
1402
1403         if (!dev->device->devargs)
1404                 return ret;
1405
1406         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1407         if (!kvlist)
1408                 return -EINVAL;
1409
1410         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1411         if (!kvargs_count)
1412                 goto free_end;
1413
1414         if (kvargs_count > 1) {
1415                 PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1416                                 ETH_I40E_VF_MSG_CFG);
1417                 ret = -EINVAL;
1418                 goto free_end;
1419         }
1420
1421         if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1422                         read_vf_msg_config, msg_cfg) < 0)
1423                 ret = -EINVAL;
1424
1425 free_end:
1426         rte_kvargs_free(kvlist);
1427         return ret;
1428 }
1429
1430 #define I40E_ALARM_INTERVAL 50000 /* us */
1431
1432 static int
1433 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1434 {
1435         struct rte_pci_device *pci_dev;
1436         struct rte_intr_handle *intr_handle;
1437         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1439         struct i40e_vsi *vsi;
1440         int ret;
1441         uint32_t len, val;
1442         uint8_t aq_fail = 0;
1443
1444         PMD_INIT_FUNC_TRACE();
1445
1446         dev->dev_ops = &i40e_eth_dev_ops;
1447         dev->rx_queue_count = i40e_dev_rx_queue_count;
1448         dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
1449         dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1450         dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1451         dev->rx_pkt_burst = i40e_recv_pkts;
1452         dev->tx_pkt_burst = i40e_xmit_pkts;
1453         dev->tx_pkt_prepare = i40e_prep_pkts;
1454
1455         /* for secondary processes, we don't initialise any further as primary
1456          * has already done this work. Only check we don't need a different
1457          * RX function */
1458         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1459                 i40e_set_rx_function(dev);
1460                 i40e_set_tx_function(dev);
1461                 return 0;
1462         }
1463         i40e_set_default_ptype_table(dev);
1464         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1465         intr_handle = &pci_dev->intr_handle;
1466
1467         rte_eth_copy_pci_info(dev, pci_dev);
1468
1469         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1470         pf->adapter->eth_dev = dev;
1471         pf->dev_data = dev->data;
1472
1473         hw->back = I40E_PF_TO_ADAPTER(pf);
1474         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1475         if (!hw->hw_addr) {
1476                 PMD_INIT_LOG(ERR,
1477                         "Hardware is not available, as address is NULL");
1478                 return -ENODEV;
1479         }
1480
1481         hw->vendor_id = pci_dev->id.vendor_id;
1482         hw->device_id = pci_dev->id.device_id;
1483         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1484         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1485         hw->bus.device = pci_dev->addr.devid;
1486         hw->bus.func = pci_dev->addr.function;
1487         hw->adapter_stopped = 0;
1488         hw->adapter_closed = 0;
1489
1490         /* Init switch device pointer */
1491         hw->switch_dev = NULL;
1492
1493         /*
1494          * Switch Tag value should not be identical to either the First Tag
1495          * or Second Tag values. So set something other than common Ethertype
1496          * for internal switching.
1497          */
1498         hw->switch_tag = 0xffff;
1499
1500         val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1501         if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1502                 PMD_INIT_LOG(ERR, "\nERROR: "
1503                         "Firmware recovery mode detected. Limiting functionality.\n"
1504                         "Refer to the Intel(R) Ethernet Adapters and Devices "
1505                         "User Guide for details on firmware recovery mode.");
1506                 return -EIO;
1507         }
1508
1509         i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1510         /* Check if need to support multi-driver */
1511         i40e_support_multi_driver(dev);
1512         /* Check if users want the latest supported vec path */
1513         i40e_use_latest_vec(dev);
1514
1515         /* Make sure all is clean before doing PF reset */
1516         i40e_clear_hw(hw);
1517
1518         /* Reset here to make sure all is clean for each PF */
1519         ret = i40e_pf_reset(hw);
1520         if (ret) {
1521                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1522                 return ret;
1523         }
1524
1525         /* Initialize the shared code (base driver) */
1526         ret = i40e_init_shared_code(hw);
1527         if (ret) {
1528                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1529                 return ret;
1530         }
1531
1532         /* Initialize the parameters for adminq */
1533         i40e_init_adminq_parameter(hw);
1534         ret = i40e_init_adminq(hw);
1535         if (ret != I40E_SUCCESS) {
1536                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1537                 return -EIO;
1538         }
1539         /* Firmware of SFP x722 does not support adminq option */
1540         if (hw->device_id == I40E_DEV_ID_SFP_X722)
1541                 hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1542
1543         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1544                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1545                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1546                      ((hw->nvm.version >> 12) & 0xf),
1547                      ((hw->nvm.version >> 4) & 0xff),
1548                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1549
1550         /* Initialize the hardware */
1551         i40e_hw_init(dev);
1552
1553         i40e_config_automask(pf);
1554
1555         i40e_set_default_pctype_table(dev);
1556
1557         /*
1558          * To work around the NVM issue, initialize registers
1559          * for packet type of QinQ by software.
1560          * It should be removed once issues are fixed in NVM.
1561          */
1562         if (!pf->support_multi_driver)
1563                 i40e_GLQF_reg_init(hw);
1564
1565         /* Initialize the input set for filters (hash and fd) to default value */
1566         i40e_filter_input_set_init(pf);
1567
1568         /* initialise the L3_MAP register */
1569         if (!pf->support_multi_driver) {
1570                 ret = i40e_aq_debug_write_global_register(hw,
1571                                                    I40E_GLQF_L3_MAP(40),
1572                                                    0x00000028,  NULL);
1573                 if (ret)
1574                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1575                                      ret);
1576                 PMD_INIT_LOG(DEBUG,
1577                              "Global register 0x%08x is changed with 0x28",
1578                              I40E_GLQF_L3_MAP(40));
1579         }
1580
1581         /* Need the special FW version to support floating VEB */
1582         config_floating_veb(dev);
1583         /* Clear PXE mode */
1584         i40e_clear_pxe_mode(hw);
1585         i40e_dev_sync_phy_type(hw);
1586
1587         /*
1588          * On X710, performance number is far from the expectation on recent
1589          * firmware versions. The fix for this issue may not be integrated in
1590          * the following firmware version. So the workaround in software driver
1591          * is needed. It needs to modify the initial values of 3 internal only
1592          * registers. Note that the workaround can be removed when it is fixed
1593          * in firmware in the future.
1594          */
1595         i40e_configure_registers(hw);
1596
1597         /* Get hw capabilities */
1598         ret = i40e_get_cap(hw);
1599         if (ret != I40E_SUCCESS) {
1600                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1601                 goto err_get_capabilities;
1602         }
1603
1604         /* Initialize parameters for PF */
1605         ret = i40e_pf_parameter_init(dev);
1606         if (ret != 0) {
1607                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1608                 goto err_parameter_init;
1609         }
1610
1611         /* Initialize the queue management */
1612         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1613         if (ret < 0) {
1614                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1615                 goto err_qp_pool_init;
1616         }
1617         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1618                                 hw->func_caps.num_msix_vectors - 1);
1619         if (ret < 0) {
1620                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1621                 goto err_msix_pool_init;
1622         }
1623
1624         /* Initialize lan hmc */
1625         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1626                                 hw->func_caps.num_rx_qp, 0, 0);
1627         if (ret != I40E_SUCCESS) {
1628                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1629                 goto err_init_lan_hmc;
1630         }
1631
1632         /* Configure lan hmc */
1633         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1634         if (ret != I40E_SUCCESS) {
1635                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1636                 goto err_configure_lan_hmc;
1637         }
1638
1639         /* Get and check the mac address */
1640         i40e_get_mac_addr(hw, hw->mac.addr);
1641         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1642                 PMD_INIT_LOG(ERR, "mac address is not valid");
1643                 ret = -EIO;
1644                 goto err_get_mac_addr;
1645         }
1646         /* Copy the permanent MAC address */
1647         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1648                         (struct rte_ether_addr *)hw->mac.perm_addr);
1649
1650         /* Disable flow control */
1651         hw->fc.requested_mode = I40E_FC_NONE;
1652         i40e_set_fc(hw, &aq_fail, TRUE);
1653
1654         /* Set the global registers with default ether type value */
1655         if (!pf->support_multi_driver) {
1656                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1657                                          RTE_ETHER_TYPE_VLAN);
1658                 if (ret != I40E_SUCCESS) {
1659                         PMD_INIT_LOG(ERR,
1660                                      "Failed to set the default outer "
1661                                      "VLAN ether type");
1662                         goto err_setup_pf_switch;
1663                 }
1664         }
1665
1666         /* PF setup, which includes VSI setup */
1667         ret = i40e_pf_setup(pf);
1668         if (ret) {
1669                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1670                 goto err_setup_pf_switch;
1671         }
1672
1673         vsi = pf->main_vsi;
1674
1675         /* Disable double vlan by default */
1676         i40e_vsi_config_double_vlan(vsi, FALSE);
1677
1678         /* Disable S-TAG identification when floating_veb is disabled */
1679         if (!pf->floating_veb) {
1680                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1681                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1682                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1683                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1684                 }
1685         }
1686
1687         if (!vsi->max_macaddrs)
1688                 len = RTE_ETHER_ADDR_LEN;
1689         else
1690                 len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1691
1692         /* Should be after VSI initialized */
1693         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1694         if (!dev->data->mac_addrs) {
1695                 PMD_INIT_LOG(ERR,
1696                         "Failed to allocated memory for storing mac address");
1697                 goto err_mac_alloc;
1698         }
1699         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1700                                         &dev->data->mac_addrs[0]);
1701
1702         /* Init dcb to sw mode by default */
1703         ret = i40e_dcb_init_configure(dev, TRUE);
1704         if (ret != I40E_SUCCESS) {
1705                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1706                 pf->flags &= ~I40E_FLAG_DCB;
1707         }
1708         /* Update HW struct after DCB configuration */
1709         i40e_get_cap(hw);
1710
1711         /* initialize pf host driver to setup SRIOV resource if applicable */
1712         i40e_pf_host_init(dev);
1713
1714         /* register callback func to eal lib */
1715         rte_intr_callback_register(intr_handle,
1716                                    i40e_dev_interrupt_handler, dev);
1717
1718         /* configure and enable device interrupt */
1719         i40e_pf_config_irq0(hw, TRUE);
1720         i40e_pf_enable_irq0(hw);
1721
1722         /* enable uio intr after callback register */
1723         rte_intr_enable(intr_handle);
1724
1725         /* By default disable flexible payload in global configuration */
1726         if (!pf->support_multi_driver)
1727                 i40e_flex_payload_reg_set_default(hw);
1728
1729         /*
1730          * Add an ethertype filter to drop all flow control frames transmitted
1731          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1732          * frames to wire.
1733          */
1734         i40e_add_tx_flow_control_drop_filter(pf);
1735
1736         /* Set the max frame size to 0x2600 by default,
1737          * in case other drivers changed the default value.
1738          */
1739         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1740
1741         /* initialize mirror rule list */
1742         TAILQ_INIT(&pf->mirror_list);
1743
1744         /* initialize RSS rule list */
1745         TAILQ_INIT(&pf->rss_config_list);
1746
1747         /* initialize Traffic Manager configuration */
1748         i40e_tm_conf_init(dev);
1749
1750         /* Initialize customized information */
1751         i40e_init_customized_info(pf);
1752
1753         /* Initialize the filter invalidation configuration */
1754         i40e_init_filter_invalidation(pf);
1755
1756         ret = i40e_init_ethtype_filter_list(dev);
1757         if (ret < 0)
1758                 goto err_init_ethtype_filter_list;
1759         ret = i40e_init_tunnel_filter_list(dev);
1760         if (ret < 0)
1761                 goto err_init_tunnel_filter_list;
1762         ret = i40e_init_fdir_filter_list(dev);
1763         if (ret < 0)
1764                 goto err_init_fdir_filter_list;
1765
1766         /* initialize queue region configuration */
1767         i40e_init_queue_region_conf(dev);
1768
1769         /* initialize RSS configuration from rte_flow */
1770         memset(&pf->rss_info, 0,
1771                 sizeof(struct i40e_rte_flow_rss_conf));
1772
1773         /* reset all stats of the device, including pf and main vsi */
1774         i40e_dev_stats_reset(dev);
1775
1776         return 0;
1777
1778 err_init_fdir_filter_list:
1779         rte_free(pf->tunnel.hash_table);
1780         rte_free(pf->tunnel.hash_map);
1781 err_init_tunnel_filter_list:
1782         rte_free(pf->ethertype.hash_table);
1783         rte_free(pf->ethertype.hash_map);
1784 err_init_ethtype_filter_list:
1785         rte_free(dev->data->mac_addrs);
1786         dev->data->mac_addrs = NULL;
1787 err_mac_alloc:
1788         i40e_vsi_release(pf->main_vsi);
1789 err_setup_pf_switch:
1790 err_get_mac_addr:
1791 err_configure_lan_hmc:
1792         (void)i40e_shutdown_lan_hmc(hw);
1793 err_init_lan_hmc:
1794         i40e_res_pool_destroy(&pf->msix_pool);
1795 err_msix_pool_init:
1796         i40e_res_pool_destroy(&pf->qp_pool);
1797 err_qp_pool_init:
1798 err_parameter_init:
1799 err_get_capabilities:
1800         (void)i40e_shutdown_adminq(hw);
1801
1802         return ret;
1803 }
1804
1805 static void
1806 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1807 {
1808         struct i40e_ethertype_filter *p_ethertype;
1809         struct i40e_ethertype_rule *ethertype_rule;
1810
1811         ethertype_rule = &pf->ethertype;
1812         /* Remove all ethertype filter rules and hash */
1813         if (ethertype_rule->hash_map)
1814                 rte_free(ethertype_rule->hash_map);
1815         if (ethertype_rule->hash_table)
1816                 rte_hash_free(ethertype_rule->hash_table);
1817
1818         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1819                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1820                              p_ethertype, rules);
1821                 rte_free(p_ethertype);
1822         }
1823 }
1824
1825 static void
1826 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1827 {
1828         struct i40e_tunnel_filter *p_tunnel;
1829         struct i40e_tunnel_rule *tunnel_rule;
1830
1831         tunnel_rule = &pf->tunnel;
1832         /* Remove all tunnel director rules and hash */
1833         if (tunnel_rule->hash_map)
1834                 rte_free(tunnel_rule->hash_map);
1835         if (tunnel_rule->hash_table)
1836                 rte_hash_free(tunnel_rule->hash_table);
1837
1838         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1839                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1840                 rte_free(p_tunnel);
1841         }
1842 }
1843
1844 static void
1845 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1846 {
1847         struct i40e_fdir_filter *p_fdir;
1848         struct i40e_fdir_info *fdir_info;
1849
1850         fdir_info = &pf->fdir;
1851
1852         /* Remove all flow director rules */
1853         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1854                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1855 }
1856
1857 static void
1858 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1859 {
1860         struct i40e_fdir_info *fdir_info;
1861
1862         fdir_info = &pf->fdir;
1863
1864         /* flow director memory cleanup */
1865         if (fdir_info->hash_map)
1866                 rte_free(fdir_info->hash_map);
1867         if (fdir_info->hash_table)
1868                 rte_hash_free(fdir_info->hash_table);
1869         if (fdir_info->fdir_flow_pool.bitmap)
1870                 rte_free(fdir_info->fdir_flow_pool.bitmap);
1871         if (fdir_info->fdir_flow_pool.pool)
1872                 rte_free(fdir_info->fdir_flow_pool.pool);
1873         if (fdir_info->fdir_filter_array)
1874                 rte_free(fdir_info->fdir_filter_array);
1875 }
1876
1877 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1878 {
1879         /*
1880          * Disable by default flexible payload
1881          * for corresponding L2/L3/L4 layers.
1882          */
1883         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1884         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1885         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1886 }
1887
1888 static int
1889 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1890 {
1891         struct i40e_hw *hw;
1892
1893         PMD_INIT_FUNC_TRACE();
1894
1895         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1896                 return 0;
1897
1898         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1899
1900         if (hw->adapter_closed == 0)
1901                 i40e_dev_close(dev);
1902
1903         return 0;
1904 }
1905
1906 static int
1907 i40e_dev_configure(struct rte_eth_dev *dev)
1908 {
1909         struct i40e_adapter *ad =
1910                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1911         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1912         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1913         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1914         int i, ret;
1915
1916         ret = i40e_dev_sync_phy_type(hw);
1917         if (ret)
1918                 return ret;
1919
1920         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1921          * bulk allocation or vector Rx preconditions we will reset it.
1922          */
1923         ad->rx_bulk_alloc_allowed = true;
1924         ad->rx_vec_allowed = true;
1925         ad->tx_simple_allowed = true;
1926         ad->tx_vec_allowed = true;
1927
1928         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1929                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1930
1931         /* Only legacy filter API needs the following fdir config. So when the
1932          * legacy filter API is deprecated, the following codes should also be
1933          * removed.
1934          */
1935         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1936                 ret = i40e_fdir_setup(pf);
1937                 if (ret != I40E_SUCCESS) {
1938                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1939                         return -ENOTSUP;
1940                 }
1941                 ret = i40e_fdir_configure(dev);
1942                 if (ret < 0) {
1943                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1944                         goto err;
1945                 }
1946         } else
1947                 i40e_fdir_teardown(pf);
1948
1949         ret = i40e_dev_init_vlan(dev);
1950         if (ret < 0)
1951                 goto err;
1952
1953         /* VMDQ setup.
1954          *  General PMD driver call sequence are NIC init, configure,
1955          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1956          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1957          *  applicable. So, VMDQ setting has to be done before
1958          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1959          *  For RSS setting, it will try to calculate actual configured RX queue
1960          *  number, which will be available after rx_queue_setup(). dev_start()
1961          *  function is good to place RSS setup.
1962          */
1963         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1964                 ret = i40e_vmdq_setup(dev);
1965                 if (ret)
1966                         goto err;
1967         }
1968
1969         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1970                 ret = i40e_dcb_setup(dev);
1971                 if (ret) {
1972                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1973                         goto err_dcb;
1974                 }
1975         }
1976
1977         TAILQ_INIT(&pf->flow_list);
1978
1979         return 0;
1980
1981 err_dcb:
1982         /* need to release vmdq resource if exists */
1983         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1984                 i40e_vsi_release(pf->vmdq[i].vsi);
1985                 pf->vmdq[i].vsi = NULL;
1986         }
1987         rte_free(pf->vmdq);
1988         pf->vmdq = NULL;
1989 err:
1990         /* Need to release fdir resource if exists.
1991          * Only legacy filter API needs the following fdir config. So when the
1992          * legacy filter API is deprecated, the following code should also be
1993          * removed.
1994          */
1995         i40e_fdir_teardown(pf);
1996         return ret;
1997 }
1998
1999 void
2000 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
2001 {
2002         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2003         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2004         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2005         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2006         uint16_t msix_vect = vsi->msix_intr;
2007         uint16_t i;
2008
2009         for (i = 0; i < vsi->nb_qps; i++) {
2010                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2011                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2012                 rte_wmb();
2013         }
2014
2015         if (vsi->type != I40E_VSI_SRIOV) {
2016                 if (!rte_intr_allow_others(intr_handle)) {
2017                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2018                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
2019                         I40E_WRITE_REG(hw,
2020                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2021                                        0);
2022                 } else {
2023                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2024                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
2025                         I40E_WRITE_REG(hw,
2026                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2027                                                        msix_vect - 1), 0);
2028                 }
2029         } else {
2030                 uint32_t reg;
2031                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2032                         vsi->user_param + (msix_vect - 1);
2033
2034                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2035                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2036         }
2037         I40E_WRITE_FLUSH(hw);
2038 }
2039
2040 static void
2041 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2042                        int base_queue, int nb_queue,
2043                        uint16_t itr_idx)
2044 {
2045         int i;
2046         uint32_t val;
2047         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2048         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2049
2050         /* Bind all RX queues to allocated MSIX interrupt */
2051         for (i = 0; i < nb_queue; i++) {
2052                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2053                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2054                         ((base_queue + i + 1) <<
2055                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2056                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2057                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2058
2059                 if (i == nb_queue - 1)
2060                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2061                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2062         }
2063
2064         /* Write first RX queue to Link list register as the head element */
2065         if (vsi->type != I40E_VSI_SRIOV) {
2066                 uint16_t interval =
2067                         i40e_calc_itr_interval(1, pf->support_multi_driver);
2068
2069                 if (msix_vect == I40E_MISC_VEC_ID) {
2070                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2071                                        (base_queue <<
2072                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2073                                        (0x0 <<
2074                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2075                         I40E_WRITE_REG(hw,
2076                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2077                                        interval);
2078                 } else {
2079                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2080                                        (base_queue <<
2081                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2082                                        (0x0 <<
2083                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2084                         I40E_WRITE_REG(hw,
2085                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2086                                                        msix_vect - 1),
2087                                        interval);
2088                 }
2089         } else {
2090                 uint32_t reg;
2091
2092                 if (msix_vect == I40E_MISC_VEC_ID) {
2093                         I40E_WRITE_REG(hw,
2094                                        I40E_VPINT_LNKLST0(vsi->user_param),
2095                                        (base_queue <<
2096                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2097                                        (0x0 <<
2098                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2099                 } else {
2100                         /* num_msix_vectors_vf needs to minus irq0 */
2101                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2102                                 vsi->user_param + (msix_vect - 1);
2103
2104                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2105                                        (base_queue <<
2106                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2107                                        (0x0 <<
2108                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2109                 }
2110         }
2111
2112         I40E_WRITE_FLUSH(hw);
2113 }
2114
2115 int
2116 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2117 {
2118         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2119         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2120         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2121         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2122         uint16_t msix_vect = vsi->msix_intr;
2123         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2124         uint16_t queue_idx = 0;
2125         int record = 0;
2126         int i;
2127
2128         for (i = 0; i < vsi->nb_qps; i++) {
2129                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2130                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2131         }
2132
2133         /* VF bind interrupt */
2134         if (vsi->type == I40E_VSI_SRIOV) {
2135                 if (vsi->nb_msix == 0) {
2136                         PMD_DRV_LOG(ERR, "No msix resource");
2137                         return -EINVAL;
2138                 }
2139                 __vsi_queues_bind_intr(vsi, msix_vect,
2140                                        vsi->base_queue, vsi->nb_qps,
2141                                        itr_idx);
2142                 return 0;
2143         }
2144
2145         /* PF & VMDq bind interrupt */
2146         if (rte_intr_dp_is_en(intr_handle)) {
2147                 if (vsi->type == I40E_VSI_MAIN) {
2148                         queue_idx = 0;
2149                         record = 1;
2150                 } else if (vsi->type == I40E_VSI_VMDQ2) {
2151                         struct i40e_vsi *main_vsi =
2152                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2153                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
2154                         record = 1;
2155                 }
2156         }
2157
2158         for (i = 0; i < vsi->nb_used_qps; i++) {
2159                 if (vsi->nb_msix == 0) {
2160                         PMD_DRV_LOG(ERR, "No msix resource");
2161                         return -EINVAL;
2162                 } else if (nb_msix <= 1) {
2163                         if (!rte_intr_allow_others(intr_handle))
2164                                 /* allow to share MISC_VEC_ID */
2165                                 msix_vect = I40E_MISC_VEC_ID;
2166
2167                         /* no enough msix_vect, map all to one */
2168                         __vsi_queues_bind_intr(vsi, msix_vect,
2169                                                vsi->base_queue + i,
2170                                                vsi->nb_used_qps - i,
2171                                                itr_idx);
2172                         for (; !!record && i < vsi->nb_used_qps; i++)
2173                                 intr_handle->intr_vec[queue_idx + i] =
2174                                         msix_vect;
2175                         break;
2176                 }
2177                 /* 1:1 queue/msix_vect mapping */
2178                 __vsi_queues_bind_intr(vsi, msix_vect,
2179                                        vsi->base_queue + i, 1,
2180                                        itr_idx);
2181                 if (!!record)
2182                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
2183
2184                 msix_vect++;
2185                 nb_msix--;
2186         }
2187
2188         return 0;
2189 }
2190
2191 void
2192 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2193 {
2194         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2195         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2196         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2197         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2198         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2199         uint16_t msix_intr, i;
2200
2201         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2202                 for (i = 0; i < vsi->nb_msix; i++) {
2203                         msix_intr = vsi->msix_intr + i;
2204                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2205                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
2206                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2207                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2208                 }
2209         else
2210                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2211                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
2212                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2213                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2214
2215         I40E_WRITE_FLUSH(hw);
2216 }
2217
2218 void
2219 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2220 {
2221         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2222         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2223         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2224         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2225         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2226         uint16_t msix_intr, i;
2227
2228         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2229                 for (i = 0; i < vsi->nb_msix; i++) {
2230                         msix_intr = vsi->msix_intr + i;
2231                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2232                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2233                 }
2234         else
2235                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2236                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2237
2238         I40E_WRITE_FLUSH(hw);
2239 }
2240
2241 static inline uint8_t
2242 i40e_parse_link_speeds(uint16_t link_speeds)
2243 {
2244         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2245
2246         if (link_speeds & ETH_LINK_SPEED_40G)
2247                 link_speed |= I40E_LINK_SPEED_40GB;
2248         if (link_speeds & ETH_LINK_SPEED_25G)
2249                 link_speed |= I40E_LINK_SPEED_25GB;
2250         if (link_speeds & ETH_LINK_SPEED_20G)
2251                 link_speed |= I40E_LINK_SPEED_20GB;
2252         if (link_speeds & ETH_LINK_SPEED_10G)
2253                 link_speed |= I40E_LINK_SPEED_10GB;
2254         if (link_speeds & ETH_LINK_SPEED_1G)
2255                 link_speed |= I40E_LINK_SPEED_1GB;
2256         if (link_speeds & ETH_LINK_SPEED_100M)
2257                 link_speed |= I40E_LINK_SPEED_100MB;
2258
2259         return link_speed;
2260 }
2261
2262 static int
2263 i40e_phy_conf_link(struct i40e_hw *hw,
2264                    uint8_t abilities,
2265                    uint8_t force_speed,
2266                    bool is_up)
2267 {
2268         enum i40e_status_code status;
2269         struct i40e_aq_get_phy_abilities_resp phy_ab;
2270         struct i40e_aq_set_phy_config phy_conf;
2271         enum i40e_aq_phy_type cnt;
2272         uint8_t avail_speed;
2273         uint32_t phy_type_mask = 0;
2274
2275         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2276                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2277                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2278                         I40E_AQ_PHY_FLAG_LOW_POWER;
2279         int ret = -ENOTSUP;
2280
2281         /* To get phy capabilities of available speeds. */
2282         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2283                                               NULL);
2284         if (status) {
2285                 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2286                                 status);
2287                 return ret;
2288         }
2289         avail_speed = phy_ab.link_speed;
2290
2291         /* To get the current phy config. */
2292         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2293                                               NULL);
2294         if (status) {
2295                 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2296                                 status);
2297                 return ret;
2298         }
2299
2300         /* If link needs to go up and it is in autoneg mode the speed is OK,
2301          * no need to set up again.
2302          */
2303         if (is_up && phy_ab.phy_type != 0 &&
2304                      abilities & I40E_AQ_PHY_AN_ENABLED &&
2305                      phy_ab.link_speed != 0)
2306                 return I40E_SUCCESS;
2307
2308         memset(&phy_conf, 0, sizeof(phy_conf));
2309
2310         /* bits 0-2 use the values from get_phy_abilities_resp */
2311         abilities &= ~mask;
2312         abilities |= phy_ab.abilities & mask;
2313
2314         phy_conf.abilities = abilities;
2315
2316         /* If link needs to go up, but the force speed is not supported,
2317          * Warn users and config the default available speeds.
2318          */
2319         if (is_up && !(force_speed & avail_speed)) {
2320                 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2321                 phy_conf.link_speed = avail_speed;
2322         } else {
2323                 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2324         }
2325
2326         /* PHY type mask needs to include each type except PHY type extension */
2327         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2328                 phy_type_mask |= 1 << cnt;
2329
2330         /* use get_phy_abilities_resp value for the rest */
2331         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2332         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2333                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2334                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2335         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2336         phy_conf.eee_capability = phy_ab.eee_capability;
2337         phy_conf.eeer = phy_ab.eeer_val;
2338         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2339
2340         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2341                     phy_ab.abilities, phy_ab.link_speed);
2342         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2343                     phy_conf.abilities, phy_conf.link_speed);
2344
2345         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2346         if (status)
2347                 return ret;
2348
2349         return I40E_SUCCESS;
2350 }
2351
2352 static int
2353 i40e_apply_link_speed(struct rte_eth_dev *dev)
2354 {
2355         uint8_t speed;
2356         uint8_t abilities = 0;
2357         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2358         struct rte_eth_conf *conf = &dev->data->dev_conf;
2359
2360         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2361                      I40E_AQ_PHY_LINK_ENABLED;
2362
2363         if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2364                 conf->link_speeds = ETH_LINK_SPEED_40G |
2365                                     ETH_LINK_SPEED_25G |
2366                                     ETH_LINK_SPEED_20G |
2367                                     ETH_LINK_SPEED_10G |
2368                                     ETH_LINK_SPEED_1G |
2369                                     ETH_LINK_SPEED_100M;
2370
2371                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2372         } else {
2373                 abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2374         }
2375         speed = i40e_parse_link_speeds(conf->link_speeds);
2376
2377         return i40e_phy_conf_link(hw, abilities, speed, true);
2378 }
2379
2380 static int
2381 i40e_dev_start(struct rte_eth_dev *dev)
2382 {
2383         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2384         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2385         struct i40e_vsi *main_vsi = pf->main_vsi;
2386         int ret, i;
2387         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2388         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2389         uint32_t intr_vector = 0;
2390         struct i40e_vsi *vsi;
2391         uint16_t nb_rxq, nb_txq;
2392
2393         hw->adapter_stopped = 0;
2394
2395         rte_intr_disable(intr_handle);
2396
2397         if ((rte_intr_cap_multiple(intr_handle) ||
2398              !RTE_ETH_DEV_SRIOV(dev).active) &&
2399             dev->data->dev_conf.intr_conf.rxq != 0) {
2400                 intr_vector = dev->data->nb_rx_queues;
2401                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2402                 if (ret)
2403                         return ret;
2404         }
2405
2406         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2407                 intr_handle->intr_vec =
2408                         rte_zmalloc("intr_vec",
2409                                     dev->data->nb_rx_queues * sizeof(int),
2410                                     0);
2411                 if (!intr_handle->intr_vec) {
2412                         PMD_INIT_LOG(ERR,
2413                                 "Failed to allocate %d rx_queues intr_vec",
2414                                 dev->data->nb_rx_queues);
2415                         return -ENOMEM;
2416                 }
2417         }
2418
2419         /* Initialize VSI */
2420         ret = i40e_dev_rxtx_init(pf);
2421         if (ret != I40E_SUCCESS) {
2422                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2423                 return ret;
2424         }
2425
2426         /* Map queues with MSIX interrupt */
2427         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2428                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2429         ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2430         if (ret < 0)
2431                 return ret;
2432         i40e_vsi_enable_queues_intr(main_vsi);
2433
2434         /* Map VMDQ VSI queues with MSIX interrupt */
2435         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2436                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2437                 ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2438                                                 I40E_ITR_INDEX_DEFAULT);
2439                 if (ret < 0)
2440                         return ret;
2441                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2442         }
2443
2444         /* Enable all queues which have been configured */
2445         for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2446                 ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2447                 if (ret)
2448                         goto rx_err;
2449         }
2450
2451         for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2452                 ret = i40e_dev_tx_queue_start(dev, nb_txq);
2453                 if (ret)
2454                         goto tx_err;
2455         }
2456
2457         /* Enable receiving broadcast packets */
2458         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2459         if (ret != I40E_SUCCESS)
2460                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2461
2462         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2463                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2464                                                 true, NULL);
2465                 if (ret != I40E_SUCCESS)
2466                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2467         }
2468
2469         /* Enable the VLAN promiscuous mode. */
2470         if (pf->vfs) {
2471                 for (i = 0; i < pf->vf_num; i++) {
2472                         vsi = pf->vfs[i].vsi;
2473                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2474                                                      true, NULL);
2475                 }
2476         }
2477
2478         /* Enable mac loopback mode */
2479         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2480             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2481                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2482                 if (ret != I40E_SUCCESS) {
2483                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2484                         goto tx_err;
2485                 }
2486         }
2487
2488         /* Apply link configure */
2489         ret = i40e_apply_link_speed(dev);
2490         if (I40E_SUCCESS != ret) {
2491                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2492                 goto tx_err;
2493         }
2494
2495         if (!rte_intr_allow_others(intr_handle)) {
2496                 rte_intr_callback_unregister(intr_handle,
2497                                              i40e_dev_interrupt_handler,
2498                                              (void *)dev);
2499                 /* configure and enable device interrupt */
2500                 i40e_pf_config_irq0(hw, FALSE);
2501                 i40e_pf_enable_irq0(hw);
2502
2503                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2504                         PMD_INIT_LOG(INFO,
2505                                 "lsc won't enable because of no intr multiplex");
2506         } else {
2507                 ret = i40e_aq_set_phy_int_mask(hw,
2508                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2509                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2510                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2511                 if (ret != I40E_SUCCESS)
2512                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2513
2514                 /* Call get_link_info aq commond to enable/disable LSE */
2515                 i40e_dev_link_update(dev, 0);
2516         }
2517
2518         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2519                 rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2520                                   i40e_dev_alarm_handler, dev);
2521         } else {
2522                 /* enable uio intr after callback register */
2523                 rte_intr_enable(intr_handle);
2524         }
2525
2526         i40e_filter_restore(pf);
2527
2528         if (pf->tm_conf.root && !pf->tm_conf.committed)
2529                 PMD_DRV_LOG(WARNING,
2530                             "please call hierarchy_commit() "
2531                             "before starting the port");
2532
2533         return I40E_SUCCESS;
2534
2535 tx_err:
2536         for (i = 0; i < nb_txq; i++)
2537                 i40e_dev_tx_queue_stop(dev, i);
2538 rx_err:
2539         for (i = 0; i < nb_rxq; i++)
2540                 i40e_dev_rx_queue_stop(dev, i);
2541
2542         return ret;
2543 }
2544
2545 static void
2546 i40e_dev_stop(struct rte_eth_dev *dev)
2547 {
2548         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2549         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2550         struct i40e_vsi *main_vsi = pf->main_vsi;
2551         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2552         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2553         int i;
2554
2555         if (hw->adapter_stopped == 1)
2556                 return;
2557
2558         if (dev->data->dev_conf.intr_conf.rxq == 0) {
2559                 rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2560                 rte_intr_enable(intr_handle);
2561         }
2562
2563         /* Disable all queues */
2564         for (i = 0; i < dev->data->nb_tx_queues; i++)
2565                 i40e_dev_tx_queue_stop(dev, i);
2566
2567         for (i = 0; i < dev->data->nb_rx_queues; i++)
2568                 i40e_dev_rx_queue_stop(dev, i);
2569
2570         /* un-map queues with interrupt registers */
2571         i40e_vsi_disable_queues_intr(main_vsi);
2572         i40e_vsi_queues_unbind_intr(main_vsi);
2573
2574         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2575                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2576                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2577         }
2578
2579         /* Clear all queues and release memory */
2580         i40e_dev_clear_queues(dev);
2581
2582         /* Set link down */
2583         i40e_dev_set_link_down(dev);
2584
2585         if (!rte_intr_allow_others(intr_handle))
2586                 /* resume to the default handler */
2587                 rte_intr_callback_register(intr_handle,
2588                                            i40e_dev_interrupt_handler,
2589                                            (void *)dev);
2590
2591         /* Clean datapath event and queue/vec mapping */
2592         rte_intr_efd_disable(intr_handle);
2593         if (intr_handle->intr_vec) {
2594                 rte_free(intr_handle->intr_vec);
2595                 intr_handle->intr_vec = NULL;
2596         }
2597
2598         /* reset hierarchy commit */
2599         pf->tm_conf.committed = false;
2600
2601         hw->adapter_stopped = 1;
2602         dev->data->dev_started = 0;
2603
2604         pf->adapter->rss_reta_updated = 0;
2605 }
2606
2607 static int
2608 i40e_dev_close(struct rte_eth_dev *dev)
2609 {
2610         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2611         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2612         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2613         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2614         struct i40e_mirror_rule *p_mirror;
2615         struct i40e_filter_control_settings settings;
2616         struct rte_flow *p_flow;
2617         uint32_t reg;
2618         int i;
2619         int ret;
2620         uint8_t aq_fail = 0;
2621         int retries = 0;
2622
2623         PMD_INIT_FUNC_TRACE();
2624         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2625                 return 0;
2626
2627         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2628         if (ret)
2629                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2630
2631
2632         i40e_dev_stop(dev);
2633
2634         /* Remove all mirror rules */
2635         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2636                 ret = i40e_aq_del_mirror_rule(hw,
2637                                               pf->main_vsi->veb->seid,
2638                                               p_mirror->rule_type,
2639                                               p_mirror->entries,
2640                                               p_mirror->num_entries,
2641                                               p_mirror->id);
2642                 if (ret < 0)
2643                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2644                                     "status = %d, aq_err = %d.", ret,
2645                                     hw->aq.asq_last_status);
2646
2647                 /* remove mirror software resource anyway */
2648                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2649                 rte_free(p_mirror);
2650                 pf->nb_mirror_rule--;
2651         }
2652
2653         i40e_dev_free_queues(dev);
2654
2655         /* Disable interrupt */
2656         i40e_pf_disable_irq0(hw);
2657         rte_intr_disable(intr_handle);
2658
2659         /*
2660          * Only legacy filter API needs the following fdir config. So when the
2661          * legacy filter API is deprecated, the following code should also be
2662          * removed.
2663          */
2664         i40e_fdir_teardown(pf);
2665
2666         /* shutdown and destroy the HMC */
2667         i40e_shutdown_lan_hmc(hw);
2668
2669         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2670                 i40e_vsi_release(pf->vmdq[i].vsi);
2671                 pf->vmdq[i].vsi = NULL;
2672         }
2673         rte_free(pf->vmdq);
2674         pf->vmdq = NULL;
2675
2676         /* release all the existing VSIs and VEBs */
2677         i40e_vsi_release(pf->main_vsi);
2678
2679         /* shutdown the adminq */
2680         i40e_aq_queue_shutdown(hw, true);
2681         i40e_shutdown_adminq(hw);
2682
2683         i40e_res_pool_destroy(&pf->qp_pool);
2684         i40e_res_pool_destroy(&pf->msix_pool);
2685
2686         /* Disable flexible payload in global configuration */
2687         if (!pf->support_multi_driver)
2688                 i40e_flex_payload_reg_set_default(hw);
2689
2690         /* force a PF reset to clean anything leftover */
2691         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2692         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2693                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2694         I40E_WRITE_FLUSH(hw);
2695
2696         /* Clear PXE mode */
2697         i40e_clear_pxe_mode(hw);
2698
2699         /* Unconfigure filter control */
2700         memset(&settings, 0, sizeof(settings));
2701         ret = i40e_set_filter_control(hw, &settings);
2702         if (ret)
2703                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2704                                         ret);
2705
2706         /* Disable flow control */
2707         hw->fc.requested_mode = I40E_FC_NONE;
2708         i40e_set_fc(hw, &aq_fail, TRUE);
2709
2710         /* uninitialize pf host driver */
2711         i40e_pf_host_uninit(dev);
2712
2713         do {
2714                 ret = rte_intr_callback_unregister(intr_handle,
2715                                 i40e_dev_interrupt_handler, dev);
2716                 if (ret >= 0 || ret == -ENOENT) {
2717                         break;
2718                 } else if (ret != -EAGAIN) {
2719                         PMD_INIT_LOG(ERR,
2720                                  "intr callback unregister failed: %d",
2721                                  ret);
2722                 }
2723                 i40e_msec_delay(500);
2724         } while (retries++ < 5);
2725
2726         i40e_rm_ethtype_filter_list(pf);
2727         i40e_rm_tunnel_filter_list(pf);
2728         i40e_rm_fdir_filter_list(pf);
2729
2730         /* Remove all flows */
2731         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2732                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2733                 /* Do not free FDIR flows since they are static allocated */
2734                 if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2735                         rte_free(p_flow);
2736         }
2737
2738         /* release the fdir static allocated memory */
2739         i40e_fdir_memory_cleanup(pf);
2740
2741         /* Remove all Traffic Manager configuration */
2742         i40e_tm_conf_uninit(dev);
2743
2744         hw->adapter_closed = 1;
2745         return 0;
2746 }
2747
2748 /*
2749  * Reset PF device only to re-initialize resources in PMD layer
2750  */
2751 static int
2752 i40e_dev_reset(struct rte_eth_dev *dev)
2753 {
2754         int ret;
2755
2756         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2757          * its VF to make them align with it. The detailed notification
2758          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2759          * To avoid unexpected behavior in VF, currently reset of PF with
2760          * SR-IOV activation is not supported. It might be supported later.
2761          */
2762         if (dev->data->sriov.active)
2763                 return -ENOTSUP;
2764
2765         ret = eth_i40e_dev_uninit(dev);
2766         if (ret)
2767                 return ret;
2768
2769         ret = eth_i40e_dev_init(dev, NULL);
2770
2771         return ret;
2772 }
2773
2774 static int
2775 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2776 {
2777         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2778         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2779         struct i40e_vsi *vsi = pf->main_vsi;
2780         int status;
2781
2782         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2783                                                      true, NULL, true);
2784         if (status != I40E_SUCCESS) {
2785                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2786                 return -EAGAIN;
2787         }
2788
2789         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2790                                                         TRUE, NULL);
2791         if (status != I40E_SUCCESS) {
2792                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2793                 /* Rollback unicast promiscuous mode */
2794                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2795                                                     false, NULL, true);
2796                 return -EAGAIN;
2797         }
2798
2799         return 0;
2800 }
2801
2802 static int
2803 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2804 {
2805         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2806         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2807         struct i40e_vsi *vsi = pf->main_vsi;
2808         int status;
2809
2810         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2811                                                      false, NULL, true);
2812         if (status != I40E_SUCCESS) {
2813                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2814                 return -EAGAIN;
2815         }
2816
2817         /* must remain in all_multicast mode */
2818         if (dev->data->all_multicast == 1)
2819                 return 0;
2820
2821         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2822                                                         false, NULL);
2823         if (status != I40E_SUCCESS) {
2824                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2825                 /* Rollback unicast promiscuous mode */
2826                 i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2827                                                     true, NULL, true);
2828                 return -EAGAIN;
2829         }
2830
2831         return 0;
2832 }
2833
2834 static int
2835 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2836 {
2837         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2838         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2839         struct i40e_vsi *vsi = pf->main_vsi;
2840         int ret;
2841
2842         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2843         if (ret != I40E_SUCCESS) {
2844                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2845                 return -EAGAIN;
2846         }
2847
2848         return 0;
2849 }
2850
2851 static int
2852 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2853 {
2854         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2855         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2856         struct i40e_vsi *vsi = pf->main_vsi;
2857         int ret;
2858
2859         if (dev->data->promiscuous == 1)
2860                 return 0; /* must remain in all_multicast mode */
2861
2862         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2863                                 vsi->seid, FALSE, NULL);
2864         if (ret != I40E_SUCCESS) {
2865                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2866                 return -EAGAIN;
2867         }
2868
2869         return 0;
2870 }
2871
2872 /*
2873  * Set device link up.
2874  */
2875 static int
2876 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2877 {
2878         /* re-apply link speed setting */
2879         return i40e_apply_link_speed(dev);
2880 }
2881
2882 /*
2883  * Set device link down.
2884  */
2885 static int
2886 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2887 {
2888         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2889         uint8_t abilities = 0;
2890         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2891
2892         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2893         return i40e_phy_conf_link(hw, abilities, speed, false);
2894 }
2895
2896 static __rte_always_inline void
2897 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2898 {
2899 /* Link status registers and values*/
2900 #define I40E_PRTMAC_LINKSTA             0x001E2420
2901 #define I40E_REG_LINK_UP                0x40000080
2902 #define I40E_PRTMAC_MACC                0x001E24E0
2903 #define I40E_REG_MACC_25GB              0x00020000
2904 #define I40E_REG_SPEED_MASK             0x38000000
2905 #define I40E_REG_SPEED_0                0x00000000
2906 #define I40E_REG_SPEED_1                0x08000000
2907 #define I40E_REG_SPEED_2                0x10000000
2908 #define I40E_REG_SPEED_3                0x18000000
2909 #define I40E_REG_SPEED_4                0x20000000
2910         uint32_t link_speed;
2911         uint32_t reg_val;
2912
2913         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2914         link_speed = reg_val & I40E_REG_SPEED_MASK;
2915         reg_val &= I40E_REG_LINK_UP;
2916         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2917
2918         if (unlikely(link->link_status == 0))
2919                 return;
2920
2921         /* Parse the link status */
2922         switch (link_speed) {
2923         case I40E_REG_SPEED_0:
2924                 link->link_speed = ETH_SPEED_NUM_100M;
2925                 break;
2926         case I40E_REG_SPEED_1:
2927                 link->link_speed = ETH_SPEED_NUM_1G;
2928                 break;
2929         case I40E_REG_SPEED_2:
2930                 if (hw->mac.type == I40E_MAC_X722)
2931                         link->link_speed = ETH_SPEED_NUM_2_5G;
2932                 else
2933                         link->link_speed = ETH_SPEED_NUM_10G;
2934                 break;
2935         case I40E_REG_SPEED_3:
2936                 if (hw->mac.type == I40E_MAC_X722) {
2937                         link->link_speed = ETH_SPEED_NUM_5G;
2938                 } else {
2939                         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2940
2941                         if (reg_val & I40E_REG_MACC_25GB)
2942                                 link->link_speed = ETH_SPEED_NUM_25G;
2943                         else
2944                                 link->link_speed = ETH_SPEED_NUM_40G;
2945                 }
2946                 break;
2947         case I40E_REG_SPEED_4:
2948                 if (hw->mac.type == I40E_MAC_X722)
2949                         link->link_speed = ETH_SPEED_NUM_10G;
2950                 else
2951                         link->link_speed = ETH_SPEED_NUM_20G;
2952                 break;
2953         default:
2954                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2955                 break;
2956         }
2957 }
2958
2959 static __rte_always_inline void
2960 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2961         bool enable_lse, int wait_to_complete)
2962 {
2963 #define CHECK_INTERVAL             100  /* 100ms */
2964 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2965         uint32_t rep_cnt = MAX_REPEAT_TIME;
2966         struct i40e_link_status link_status;
2967         int status;
2968
2969         memset(&link_status, 0, sizeof(link_status));
2970
2971         do {
2972                 memset(&link_status, 0, sizeof(link_status));
2973
2974                 /* Get link status information from hardware */
2975                 status = i40e_aq_get_link_info(hw, enable_lse,
2976                                                 &link_status, NULL);
2977                 if (unlikely(status != I40E_SUCCESS)) {
2978                         link->link_speed = ETH_SPEED_NUM_NONE;
2979                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2980                         PMD_DRV_LOG(ERR, "Failed to get link info");
2981                         return;
2982                 }
2983
2984                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2985                 if (!wait_to_complete || link->link_status)
2986                         break;
2987
2988                 rte_delay_ms(CHECK_INTERVAL);
2989         } while (--rep_cnt);
2990
2991         /* Parse the link status */
2992         switch (link_status.link_speed) {
2993         case I40E_LINK_SPEED_100MB:
2994                 link->link_speed = ETH_SPEED_NUM_100M;
2995                 break;
2996         case I40E_LINK_SPEED_1GB:
2997                 link->link_speed = ETH_SPEED_NUM_1G;
2998                 break;
2999         case I40E_LINK_SPEED_10GB:
3000                 link->link_speed = ETH_SPEED_NUM_10G;
3001                 break;
3002         case I40E_LINK_SPEED_20GB:
3003                 link->link_speed = ETH_SPEED_NUM_20G;
3004                 break;
3005         case I40E_LINK_SPEED_25GB:
3006                 link->link_speed = ETH_SPEED_NUM_25G;
3007                 break;
3008         case I40E_LINK_SPEED_40GB:
3009                 link->link_speed = ETH_SPEED_NUM_40G;
3010                 break;
3011         default:
3012                 if (link->link_status)
3013                         link->link_speed = ETH_SPEED_NUM_UNKNOWN;
3014                 else
3015                         link->link_speed = ETH_SPEED_NUM_NONE;
3016                 break;
3017         }
3018 }
3019
3020 int
3021 i40e_dev_link_update(struct rte_eth_dev *dev,
3022                      int wait_to_complete)
3023 {
3024         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3025         struct rte_eth_link link;
3026         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3027         int ret;
3028
3029         memset(&link, 0, sizeof(link));
3030
3031         /* i40e uses full duplex only */
3032         link.link_duplex = ETH_LINK_FULL_DUPLEX;
3033         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3034                         ETH_LINK_SPEED_FIXED);
3035
3036         if (!wait_to_complete && !enable_lse)
3037                 update_link_reg(hw, &link);
3038         else
3039                 update_link_aq(hw, &link, enable_lse, wait_to_complete);
3040
3041         if (hw->switch_dev)
3042                 rte_eth_linkstatus_get(hw->switch_dev, &link);
3043
3044         ret = rte_eth_linkstatus_set(dev, &link);
3045         i40e_notify_all_vfs_link_status(dev);
3046
3047         return ret;
3048 }
3049
3050 static void
3051 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3052                           uint32_t loreg, bool offset_loaded, uint64_t *offset,
3053                           uint64_t *stat, uint64_t *prev_stat)
3054 {
3055         i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3056         /* enlarge the limitation when statistics counters overflowed */
3057         if (offset_loaded) {
3058                 if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3059                         *stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3060                 *stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3061         }
3062         *prev_stat = *stat;
3063 }
3064
3065 /* Get all the statistics of a VSI */
3066 void
3067 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3068 {
3069         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3070         struct i40e_eth_stats *nes = &vsi->eth_stats;
3071         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3072         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3073
3074         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3075                                   vsi->offset_loaded, &oes->rx_bytes,
3076                                   &nes->rx_bytes, &vsi->prev_rx_bytes);
3077         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3078                             vsi->offset_loaded, &oes->rx_unicast,
3079                             &nes->rx_unicast);
3080         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3081                             vsi->offset_loaded, &oes->rx_multicast,
3082                             &nes->rx_multicast);
3083         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3084                             vsi->offset_loaded, &oes->rx_broadcast,
3085                             &nes->rx_broadcast);
3086         /* exclude CRC bytes */
3087         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3088                 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3089
3090         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3091                             &oes->rx_discards, &nes->rx_discards);
3092         /* GLV_REPC not supported */
3093         /* GLV_RMPC not supported */
3094         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3095                             &oes->rx_unknown_protocol,
3096                             &nes->rx_unknown_protocol);
3097         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3098                                   vsi->offset_loaded, &oes->tx_bytes,
3099                                   &nes->tx_bytes, &vsi->prev_tx_bytes);
3100         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3101                             vsi->offset_loaded, &oes->tx_unicast,
3102                             &nes->tx_unicast);
3103         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3104                             vsi->offset_loaded, &oes->tx_multicast,
3105                             &nes->tx_multicast);
3106         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3107                             vsi->offset_loaded,  &oes->tx_broadcast,
3108                             &nes->tx_broadcast);
3109         /* GLV_TDPC not supported */
3110         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3111                             &oes->tx_errors, &nes->tx_errors);
3112         vsi->offset_loaded = true;
3113
3114         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3115                     vsi->vsi_id);
3116         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3117         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3118         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3119         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3120         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3121         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3122                     nes->rx_unknown_protocol);
3123         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3124         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3125         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3126         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3127         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3128         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3129         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3130                     vsi->vsi_id);
3131 }
3132
3133 static void
3134 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3135 {
3136         unsigned int i;
3137         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3138         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3139
3140         /* Get rx/tx bytes of internal transfer packets */
3141         i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3142                                   I40E_GLV_GORCL(hw->port),
3143                                   pf->offset_loaded,
3144                                   &pf->internal_stats_offset.rx_bytes,
3145                                   &pf->internal_stats.rx_bytes,
3146                                   &pf->internal_prev_rx_bytes);
3147         i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3148                                   I40E_GLV_GOTCL(hw->port),
3149                                   pf->offset_loaded,
3150                                   &pf->internal_stats_offset.tx_bytes,
3151                                   &pf->internal_stats.tx_bytes,
3152                                   &pf->internal_prev_tx_bytes);
3153         /* Get total internal rx packet count */
3154         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3155                             I40E_GLV_UPRCL(hw->port),
3156                             pf->offset_loaded,
3157                             &pf->internal_stats_offset.rx_unicast,
3158                             &pf->internal_stats.rx_unicast);
3159         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3160                             I40E_GLV_MPRCL(hw->port),
3161                             pf->offset_loaded,
3162                             &pf->internal_stats_offset.rx_multicast,
3163                             &pf->internal_stats.rx_multicast);
3164         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3165                             I40E_GLV_BPRCL(hw->port),
3166                             pf->offset_loaded,
3167                             &pf->internal_stats_offset.rx_broadcast,
3168                             &pf->internal_stats.rx_broadcast);
3169         /* Get total internal tx packet count */
3170         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3171                             I40E_GLV_UPTCL(hw->port),
3172                             pf->offset_loaded,
3173                             &pf->internal_stats_offset.tx_unicast,
3174                             &pf->internal_stats.tx_unicast);
3175         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3176                             I40E_GLV_MPTCL(hw->port),
3177                             pf->offset_loaded,
3178                             &pf->internal_stats_offset.tx_multicast,
3179                             &pf->internal_stats.tx_multicast);
3180         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3181                             I40E_GLV_BPTCL(hw->port),
3182                             pf->offset_loaded,
3183                             &pf->internal_stats_offset.tx_broadcast,
3184                             &pf->internal_stats.tx_broadcast);
3185
3186         /* exclude CRC size */
3187         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3188                 pf->internal_stats.rx_multicast +
3189                 pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3190
3191         /* Get statistics of struct i40e_eth_stats */
3192         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3193                                   I40E_GLPRT_GORCL(hw->port),
3194                                   pf->offset_loaded, &os->eth.rx_bytes,
3195                                   &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3196         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3197                             I40E_GLPRT_UPRCL(hw->port),
3198                             pf->offset_loaded, &os->eth.rx_unicast,
3199                             &ns->eth.rx_unicast);
3200         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3201                             I40E_GLPRT_MPRCL(hw->port),
3202                             pf->offset_loaded, &os->eth.rx_multicast,
3203                             &ns->eth.rx_multicast);
3204         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3205                             I40E_GLPRT_BPRCL(hw->port),
3206                             pf->offset_loaded, &os->eth.rx_broadcast,
3207                             &ns->eth.rx_broadcast);
3208         /* Workaround: CRC size should not be included in byte statistics,
3209          * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3210          * packet.
3211          */
3212         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3213                 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3214
3215         /* exclude internal rx bytes
3216          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3217          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3218          * value.
3219          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3220          */
3221         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3222                 ns->eth.rx_bytes = 0;
3223         else
3224                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3225
3226         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3227                 ns->eth.rx_unicast = 0;
3228         else
3229                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3230
3231         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3232                 ns->eth.rx_multicast = 0;
3233         else
3234                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3235
3236         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3237                 ns->eth.rx_broadcast = 0;
3238         else
3239                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3240
3241         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3242                             pf->offset_loaded, &os->eth.rx_discards,
3243                             &ns->eth.rx_discards);
3244         /* GLPRT_REPC not supported */
3245         /* GLPRT_RMPC not supported */
3246         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3247                             pf->offset_loaded,
3248                             &os->eth.rx_unknown_protocol,
3249                             &ns->eth.rx_unknown_protocol);
3250         i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3251                                   I40E_GLPRT_GOTCL(hw->port),
3252                                   pf->offset_loaded, &os->eth.tx_bytes,
3253                                   &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3254         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3255                             I40E_GLPRT_UPTCL(hw->port),
3256                             pf->offset_loaded, &os->eth.tx_unicast,
3257                             &ns->eth.tx_unicast);
3258         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3259                             I40E_GLPRT_MPTCL(hw->port),
3260                             pf->offset_loaded, &os->eth.tx_multicast,
3261                             &ns->eth.tx_multicast);
3262         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3263                             I40E_GLPRT_BPTCL(hw->port),
3264                             pf->offset_loaded, &os->eth.tx_broadcast,
3265                             &ns->eth.tx_broadcast);
3266         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3267                 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3268
3269         /* exclude internal tx bytes
3270          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3271          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3272          * value.
3273          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3274          */
3275         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3276                 ns->eth.tx_bytes = 0;
3277         else
3278                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3279
3280         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3281                 ns->eth.tx_unicast = 0;
3282         else
3283                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3284
3285         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3286                 ns->eth.tx_multicast = 0;
3287         else
3288                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3289
3290         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3291                 ns->eth.tx_broadcast = 0;
3292         else
3293                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3294
3295         /* GLPRT_TEPC not supported */
3296
3297         /* additional port specific stats */
3298         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3299                             pf->offset_loaded, &os->tx_dropped_link_down,
3300                             &ns->tx_dropped_link_down);
3301         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3302                             pf->offset_loaded, &os->crc_errors,
3303                             &ns->crc_errors);
3304         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3305                             pf->offset_loaded, &os->illegal_bytes,
3306                             &ns->illegal_bytes);
3307         /* GLPRT_ERRBC not supported */
3308         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3309                             pf->offset_loaded, &os->mac_local_faults,
3310                             &ns->mac_local_faults);
3311         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3312                             pf->offset_loaded, &os->mac_remote_faults,
3313                             &ns->mac_remote_faults);
3314         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3315                             pf->offset_loaded, &os->rx_length_errors,
3316                             &ns->rx_length_errors);
3317         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3318                             pf->offset_loaded, &os->link_xon_rx,
3319                             &ns->link_xon_rx);
3320         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3321                             pf->offset_loaded, &os->link_xoff_rx,
3322                             &ns->link_xoff_rx);
3323         for (i = 0; i < 8; i++) {
3324                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3325                                     pf->offset_loaded,
3326                                     &os->priority_xon_rx[i],
3327                                     &ns->priority_xon_rx[i]);
3328                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3329                                     pf->offset_loaded,
3330                                     &os->priority_xoff_rx[i],
3331                                     &ns->priority_xoff_rx[i]);
3332         }
3333         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3334                             pf->offset_loaded, &os->link_xon_tx,
3335                             &ns->link_xon_tx);
3336         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3337                             pf->offset_loaded, &os->link_xoff_tx,
3338                             &ns->link_xoff_tx);
3339         for (i = 0; i < 8; i++) {
3340                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3341                                     pf->offset_loaded,
3342                                     &os->priority_xon_tx[i],
3343                                     &ns->priority_xon_tx[i]);
3344                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3345                                     pf->offset_loaded,
3346                                     &os->priority_xoff_tx[i],
3347                                     &ns->priority_xoff_tx[i]);
3348                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3349                                     pf->offset_loaded,
3350                                     &os->priority_xon_2_xoff[i],
3351                                     &ns->priority_xon_2_xoff[i]);
3352         }
3353         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3354                             I40E_GLPRT_PRC64L(hw->port),
3355                             pf->offset_loaded, &os->rx_size_64,
3356                             &ns->rx_size_64);
3357         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3358                             I40E_GLPRT_PRC127L(hw->port),
3359                             pf->offset_loaded, &os->rx_size_127,
3360                             &ns->rx_size_127);
3361         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3362                             I40E_GLPRT_PRC255L(hw->port),
3363                             pf->offset_loaded, &os->rx_size_255,
3364                             &ns->rx_size_255);
3365         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3366                             I40E_GLPRT_PRC511L(hw->port),
3367                             pf->offset_loaded, &os->rx_size_511,
3368                             &ns->rx_size_511);
3369         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3370                             I40E_GLPRT_PRC1023L(hw->port),
3371                             pf->offset_loaded, &os->rx_size_1023,
3372                             &ns->rx_size_1023);
3373         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3374                             I40E_GLPRT_PRC1522L(hw->port),
3375                             pf->offset_loaded, &os->rx_size_1522,
3376                             &ns->rx_size_1522);
3377         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3378                             I40E_GLPRT_PRC9522L(hw->port),
3379                             pf->offset_loaded, &os->rx_size_big,
3380                             &ns->rx_size_big);
3381         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3382                             pf->offset_loaded, &os->rx_undersize,
3383                             &ns->rx_undersize);
3384         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3385                             pf->offset_loaded, &os->rx_fragments,
3386                             &ns->rx_fragments);
3387         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3388                             pf->offset_loaded, &os->rx_oversize,
3389                             &ns->rx_oversize);
3390         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3391                             pf->offset_loaded, &os->rx_jabber,
3392                             &ns->rx_jabber);
3393         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3394                             I40E_GLPRT_PTC64L(hw->port),
3395                             pf->offset_loaded, &os->tx_size_64,
3396                             &ns->tx_size_64);
3397         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3398                             I40E_GLPRT_PTC127L(hw->port),
3399                             pf->offset_loaded, &os->tx_size_127,
3400                             &ns->tx_size_127);
3401         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3402                             I40E_GLPRT_PTC255L(hw->port),
3403                             pf->offset_loaded, &os->tx_size_255,
3404                             &ns->tx_size_255);
3405         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3406                             I40E_GLPRT_PTC511L(hw->port),
3407                             pf->offset_loaded, &os->tx_size_511,
3408                             &ns->tx_size_511);
3409         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3410                             I40E_GLPRT_PTC1023L(hw->port),
3411                             pf->offset_loaded, &os->tx_size_1023,
3412                             &ns->tx_size_1023);
3413         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3414                             I40E_GLPRT_PTC1522L(hw->port),
3415                             pf->offset_loaded, &os->tx_size_1522,
3416                             &ns->tx_size_1522);
3417         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3418                             I40E_GLPRT_PTC9522L(hw->port),
3419                             pf->offset_loaded, &os->tx_size_big,
3420                             &ns->tx_size_big);
3421         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3422                            pf->offset_loaded,
3423                            &os->fd_sb_match, &ns->fd_sb_match);
3424         /* GLPRT_MSPDC not supported */
3425         /* GLPRT_XEC not supported */
3426
3427         pf->offset_loaded = true;
3428
3429         if (pf->main_vsi)
3430                 i40e_update_vsi_stats(pf->main_vsi);
3431 }
3432
3433 /* Get all statistics of a port */
3434 static int
3435 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3436 {
3437         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3439         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3440         struct i40e_vsi *vsi;
3441         unsigned i;
3442
3443         /* call read registers - updates values, now write them to struct */
3444         i40e_read_stats_registers(pf, hw);
3445
3446         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3447                         pf->main_vsi->eth_stats.rx_multicast +
3448                         pf->main_vsi->eth_stats.rx_broadcast -
3449                         pf->main_vsi->eth_stats.rx_discards;
3450         stats->opackets = ns->eth.tx_unicast +
3451                         ns->eth.tx_multicast +
3452                         ns->eth.tx_broadcast;
3453         stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3454         stats->obytes   = ns->eth.tx_bytes;
3455         stats->oerrors  = ns->eth.tx_errors +
3456                         pf->main_vsi->eth_stats.tx_errors;
3457
3458         /* Rx Errors */
3459         stats->imissed  = ns->eth.rx_discards +
3460                         pf->main_vsi->eth_stats.rx_discards;
3461         stats->ierrors  = ns->crc_errors +
3462                         ns->rx_length_errors + ns->rx_undersize +
3463                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3464
3465         if (pf->vfs) {
3466                 for (i = 0; i < pf->vf_num; i++) {
3467                         vsi = pf->vfs[i].vsi;
3468                         i40e_update_vsi_stats(vsi);
3469
3470                         stats->ipackets += (vsi->eth_stats.rx_unicast +
3471                                         vsi->eth_stats.rx_multicast +
3472                                         vsi->eth_stats.rx_broadcast -
3473                                         vsi->eth_stats.rx_discards);
3474                         stats->ibytes   += vsi->eth_stats.rx_bytes;
3475                         stats->oerrors  += vsi->eth_stats.tx_errors;
3476                         stats->imissed  += vsi->eth_stats.rx_discards;
3477                 }
3478         }
3479
3480         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3481         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3482         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3483         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3484         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3485         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3486         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3487                     ns->eth.rx_unknown_protocol);
3488         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3489         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3490         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3491         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3492         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3493         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3494
3495         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3496                     ns->tx_dropped_link_down);
3497         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3498         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3499                     ns->illegal_bytes);
3500         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3501         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3502                     ns->mac_local_faults);
3503         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3504                     ns->mac_remote_faults);
3505         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3506                     ns->rx_length_errors);
3507         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3508         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3509         for (i = 0; i < 8; i++) {
3510                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3511                                 i, ns->priority_xon_rx[i]);
3512                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3513                                 i, ns->priority_xoff_rx[i]);
3514         }
3515         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3516         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3517         for (i = 0; i < 8; i++) {
3518                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3519                                 i, ns->priority_xon_tx[i]);
3520                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3521                                 i, ns->priority_xoff_tx[i]);
3522                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3523                                 i, ns->priority_xon_2_xoff[i]);
3524         }
3525         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3526         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3527         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3528         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3529         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3530         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3531         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3532         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3533         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3534         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3535         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3536         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3537         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3538         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3539         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3540         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3541         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3542         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3543         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3544                         ns->mac_short_packet_dropped);
3545         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3546                     ns->checksum_error);
3547         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3548         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3549         return 0;
3550 }
3551
3552 /* Reset the statistics */
3553 static int
3554 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3555 {
3556         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3557         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3558
3559         /* Mark PF and VSI stats to update the offset, aka "reset" */
3560         pf->offset_loaded = false;
3561         if (pf->main_vsi)
3562                 pf->main_vsi->offset_loaded = false;
3563
3564         /* read the stats, reading current register values into offset */
3565         i40e_read_stats_registers(pf, hw);
3566
3567         return 0;
3568 }
3569
3570 static uint32_t
3571 i40e_xstats_calc_num(void)
3572 {
3573         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3574                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3575                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3576 }
3577
3578 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3579                                      struct rte_eth_xstat_name *xstats_names,
3580                                      __rte_unused unsigned limit)
3581 {
3582         unsigned count = 0;
3583         unsigned i, prio;
3584
3585         if (xstats_names == NULL)
3586                 return i40e_xstats_calc_num();
3587
3588         /* Note: limit checked in rte_eth_xstats_names() */
3589
3590         /* Get stats from i40e_eth_stats struct */
3591         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3592                 strlcpy(xstats_names[count].name,
3593                         rte_i40e_stats_strings[i].name,
3594                         sizeof(xstats_names[count].name));
3595                 count++;
3596         }
3597
3598         /* Get individiual stats from i40e_hw_port struct */
3599         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3600                 strlcpy(xstats_names[count].name,
3601                         rte_i40e_hw_port_strings[i].name,
3602                         sizeof(xstats_names[count].name));
3603                 count++;
3604         }
3605
3606         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3607                 for (prio = 0; prio < 8; prio++) {
3608                         snprintf(xstats_names[count].name,
3609                                  sizeof(xstats_names[count].name),
3610                                  "rx_priority%u_%s", prio,
3611                                  rte_i40e_rxq_prio_strings[i].name);
3612                         count++;
3613                 }
3614         }
3615
3616         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3617                 for (prio = 0; prio < 8; prio++) {
3618                         snprintf(xstats_names[count].name,
3619                                  sizeof(xstats_names[count].name),
3620                                  "tx_priority%u_%s", prio,
3621                                  rte_i40e_txq_prio_strings[i].name);
3622                         count++;
3623                 }
3624         }
3625         return count;
3626 }
3627
3628 static int
3629 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3630                     unsigned n)
3631 {
3632         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3633         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3634         unsigned i, count, prio;
3635         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3636
3637         count = i40e_xstats_calc_num();
3638         if (n < count)
3639                 return count;
3640
3641         i40e_read_stats_registers(pf, hw);
3642
3643         if (xstats == NULL)
3644                 return 0;
3645
3646         count = 0;
3647
3648         /* Get stats from i40e_eth_stats struct */
3649         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3650                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3651                         rte_i40e_stats_strings[i].offset);
3652                 xstats[count].id = count;
3653                 count++;
3654         }
3655
3656         /* Get individiual stats from i40e_hw_port struct */
3657         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3658                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3659                         rte_i40e_hw_port_strings[i].offset);
3660                 xstats[count].id = count;
3661                 count++;
3662         }
3663
3664         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3665                 for (prio = 0; prio < 8; prio++) {
3666                         xstats[count].value =
3667                                 *(uint64_t *)(((char *)hw_stats) +
3668                                 rte_i40e_rxq_prio_strings[i].offset +
3669                                 (sizeof(uint64_t) * prio));
3670                         xstats[count].id = count;
3671                         count++;
3672                 }
3673         }
3674
3675         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3676                 for (prio = 0; prio < 8; prio++) {
3677                         xstats[count].value =
3678                                 *(uint64_t *)(((char *)hw_stats) +
3679                                 rte_i40e_txq_prio_strings[i].offset +
3680                                 (sizeof(uint64_t) * prio));
3681                         xstats[count].id = count;
3682                         count++;
3683                 }
3684         }
3685
3686         return count;
3687 }
3688
3689 static int
3690 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3691 {
3692         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3693         u32 full_ver;
3694         u8 ver, patch;
3695         u16 build;
3696         int ret;
3697
3698         full_ver = hw->nvm.oem_ver;
3699         ver = (u8)(full_ver >> 24);
3700         build = (u16)((full_ver >> 8) & 0xffff);
3701         patch = (u8)(full_ver & 0xff);
3702
3703         ret = snprintf(fw_version, fw_size,
3704                  "%d.%d%d 0x%08x %d.%d.%d",
3705                  ((hw->nvm.version >> 12) & 0xf),
3706                  ((hw->nvm.version >> 4) & 0xff),
3707                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3708                  ver, build, patch);
3709
3710         ret += 1; /* add the size of '\0' */
3711         if (fw_size < (u32)ret)
3712                 return ret;
3713         else
3714                 return 0;
3715 }
3716
3717 /*
3718  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3719  * the Rx data path does not hang if the FW LLDP is stopped.
3720  * return true if lldp need to stop
3721  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3722  */
3723 static bool
3724 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3725 {
3726         double nvm_ver;
3727         char ver_str[64] = {0};
3728         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3729
3730         i40e_fw_version_get(dev, ver_str, 64);
3731         nvm_ver = atof(ver_str);
3732         if ((hw->mac.type == I40E_MAC_X722 ||
3733              hw->mac.type == I40E_MAC_X722_VF) &&
3734              ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3735                 return true;
3736         else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3737                 return true;
3738
3739         return false;
3740 }
3741
3742 static int
3743 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3744 {
3745         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3746         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3747         struct i40e_vsi *vsi = pf->main_vsi;
3748         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3749
3750         dev_info->max_rx_queues = vsi->nb_qps;
3751         dev_info->max_tx_queues = vsi->nb_qps;
3752         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3753         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3754         dev_info->max_mac_addrs = vsi->max_macaddrs;
3755         dev_info->max_vfs = pci_dev->max_vfs;
3756         dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3757         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3758         dev_info->rx_queue_offload_capa = 0;
3759         dev_info->rx_offload_capa =
3760                 DEV_RX_OFFLOAD_VLAN_STRIP |
3761                 DEV_RX_OFFLOAD_QINQ_STRIP |
3762                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3763                 DEV_RX_OFFLOAD_UDP_CKSUM |
3764                 DEV_RX_OFFLOAD_TCP_CKSUM |
3765                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3766                 DEV_RX_OFFLOAD_KEEP_CRC |
3767                 DEV_RX_OFFLOAD_SCATTER |
3768                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3769                 DEV_RX_OFFLOAD_VLAN_FILTER |
3770                 DEV_RX_OFFLOAD_JUMBO_FRAME |
3771                 DEV_RX_OFFLOAD_RSS_HASH;
3772
3773         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3774         dev_info->tx_offload_capa =
3775                 DEV_TX_OFFLOAD_VLAN_INSERT |
3776                 DEV_TX_OFFLOAD_QINQ_INSERT |
3777                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3778                 DEV_TX_OFFLOAD_UDP_CKSUM |
3779                 DEV_TX_OFFLOAD_TCP_CKSUM |
3780                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3781                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3782                 DEV_TX_OFFLOAD_TCP_TSO |
3783                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3784                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3785                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3786                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3787                 DEV_TX_OFFLOAD_MULTI_SEGS |
3788                 dev_info->tx_queue_offload_capa;
3789         dev_info->dev_capa =
3790                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3791                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3792
3793         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3794                                                 sizeof(uint32_t);
3795         dev_info->reta_size = pf->hash_lut_size;
3796         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3797
3798         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3799                 .rx_thresh = {
3800                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3801                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3802                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3803                 },
3804                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3805                 .rx_drop_en = 0,
3806                 .offloads = 0,
3807         };
3808
3809         dev_info->default_txconf = (struct rte_eth_txconf) {
3810                 .tx_thresh = {
3811                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3812                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3813                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3814                 },
3815                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3816                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3817                 .offloads = 0,
3818         };
3819
3820         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3821                 .nb_max = I40E_MAX_RING_DESC,
3822                 .nb_min = I40E_MIN_RING_DESC,
3823                 .nb_align = I40E_ALIGN_RING_DESC,
3824         };
3825
3826         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3827                 .nb_max = I40E_MAX_RING_DESC,
3828                 .nb_min = I40E_MIN_RING_DESC,
3829                 .nb_align = I40E_ALIGN_RING_DESC,
3830                 .nb_seg_max = I40E_TX_MAX_SEG,
3831                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3832         };
3833
3834         if (pf->flags & I40E_FLAG_VMDQ) {
3835                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3836                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3837                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3838                                                 pf->max_nb_vmdq_vsi;
3839                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3840                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3841                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3842         }
3843
3844         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3845                 /* For XL710 */
3846                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3847                 dev_info->default_rxportconf.nb_queues = 2;
3848                 dev_info->default_txportconf.nb_queues = 2;
3849                 if (dev->data->nb_rx_queues == 1)
3850                         dev_info->default_rxportconf.ring_size = 2048;
3851                 else
3852                         dev_info->default_rxportconf.ring_size = 1024;
3853                 if (dev->data->nb_tx_queues == 1)
3854                         dev_info->default_txportconf.ring_size = 1024;
3855                 else
3856                         dev_info->default_txportconf.ring_size = 512;
3857
3858         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3859                 /* For XXV710 */
3860                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3861                 dev_info->default_rxportconf.nb_queues = 1;
3862                 dev_info->default_txportconf.nb_queues = 1;
3863                 dev_info->default_rxportconf.ring_size = 256;
3864                 dev_info->default_txportconf.ring_size = 256;
3865         } else {
3866                 /* For X710 */
3867                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3868                 dev_info->default_rxportconf.nb_queues = 1;
3869                 dev_info->default_txportconf.nb_queues = 1;
3870                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3871                         dev_info->default_rxportconf.ring_size = 512;
3872                         dev_info->default_txportconf.ring_size = 256;
3873                 } else {
3874                         dev_info->default_rxportconf.ring_size = 256;
3875                         dev_info->default_txportconf.ring_size = 256;
3876                 }
3877         }
3878         dev_info->default_rxportconf.burst_size = 32;
3879         dev_info->default_txportconf.burst_size = 32;
3880
3881         return 0;
3882 }
3883
3884 static int
3885 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3886 {
3887         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3888         struct i40e_vsi *vsi = pf->main_vsi;
3889         PMD_INIT_FUNC_TRACE();
3890
3891         if (on)
3892                 return i40e_vsi_add_vlan(vsi, vlan_id);
3893         else
3894                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3895 }
3896
3897 static int
3898 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3899                                 enum rte_vlan_type vlan_type,
3900                                 uint16_t tpid, int qinq)
3901 {
3902         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3903         uint64_t reg_r = 0;
3904         uint64_t reg_w = 0;
3905         uint16_t reg_id = 3;
3906         int ret;
3907
3908         if (qinq) {
3909                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3910                         reg_id = 2;
3911         }
3912
3913         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3914                                           &reg_r, NULL);
3915         if (ret != I40E_SUCCESS) {
3916                 PMD_DRV_LOG(ERR,
3917                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3918                            reg_id);
3919                 return -EIO;
3920         }
3921         PMD_DRV_LOG(DEBUG,
3922                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3923                     reg_id, reg_r);
3924
3925         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3926         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3927         if (reg_r == reg_w) {
3928                 PMD_DRV_LOG(DEBUG, "No need to write");
3929                 return 0;
3930         }
3931
3932         ret = i40e_aq_debug_write_global_register(hw,
3933                                            I40E_GL_SWT_L2TAGCTRL(reg_id),
3934                                            reg_w, NULL);
3935         if (ret != I40E_SUCCESS) {
3936                 PMD_DRV_LOG(ERR,
3937                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3938                             reg_id);
3939                 return -EIO;
3940         }
3941         PMD_DRV_LOG(DEBUG,
3942                     "Global register 0x%08x is changed with value 0x%08x",
3943                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3944
3945         return 0;
3946 }
3947
3948 static int
3949 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3950                    enum rte_vlan_type vlan_type,
3951                    uint16_t tpid)
3952 {
3953         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3954         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3955         int qinq = dev->data->dev_conf.rxmode.offloads &
3956                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3957         int ret = 0;
3958
3959         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3960              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3961             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3962                 PMD_DRV_LOG(ERR,
3963                             "Unsupported vlan type.");
3964                 return -EINVAL;
3965         }
3966
3967         if (pf->support_multi_driver) {
3968                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3969                 return -ENOTSUP;
3970         }
3971
3972         /* 802.1ad frames ability is added in NVM API 1.7*/
3973         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3974                 if (qinq) {
3975                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3976                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3977                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3978                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3979                 } else {
3980                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3981                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3982                 }
3983                 ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3984                 if (ret != I40E_SUCCESS) {
3985                         PMD_DRV_LOG(ERR,
3986                                     "Set switch config failed aq_err: %d",
3987                                     hw->aq.asq_last_status);
3988                         ret = -EIO;
3989                 }
3990         } else
3991                 /* If NVM API < 1.7, keep the register setting */
3992                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3993                                                       tpid, qinq);
3994
3995         return ret;
3996 }
3997
3998 /* Configure outer vlan stripping on or off in QinQ mode */
3999 static int
4000 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
4001 {
4002         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4003         int ret = I40E_SUCCESS;
4004         uint32_t reg;
4005
4006         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
4007                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
4008                 return -EINVAL;
4009         }
4010
4011         /* Configure for outer VLAN RX stripping */
4012         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
4013
4014         if (on)
4015                 reg |= I40E_VSI_TSR_QINQ_STRIP;
4016         else
4017                 reg &= ~I40E_VSI_TSR_QINQ_STRIP;
4018
4019         ret = i40e_aq_debug_write_register(hw,
4020                                                    I40E_VSI_TSR(vsi->vsi_id),
4021                                                    reg, NULL);
4022         if (ret < 0) {
4023                 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
4024                                     vsi->vsi_id);
4025                 return I40E_ERR_CONFIG;
4026         }
4027
4028         return ret;
4029 }
4030
4031 static int
4032 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4033 {
4034         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4035         struct i40e_vsi *vsi = pf->main_vsi;
4036         struct rte_eth_rxmode *rxmode;
4037
4038         rxmode = &dev->data->dev_conf.rxmode;
4039         if (mask & ETH_VLAN_FILTER_MASK) {
4040                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4041                         i40e_vsi_config_vlan_filter(vsi, TRUE);
4042                 else
4043                         i40e_vsi_config_vlan_filter(vsi, FALSE);
4044         }
4045
4046         if (mask & ETH_VLAN_STRIP_MASK) {
4047                 /* Enable or disable VLAN stripping */
4048                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4049                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
4050                 else
4051                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
4052         }
4053
4054         if (mask & ETH_VLAN_EXTEND_MASK) {
4055                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4056                         i40e_vsi_config_double_vlan(vsi, TRUE);
4057                         /* Set global registers with default ethertype. */
4058                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4059                                            RTE_ETHER_TYPE_VLAN);
4060                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4061                                            RTE_ETHER_TYPE_VLAN);
4062                 }
4063                 else
4064                         i40e_vsi_config_double_vlan(vsi, FALSE);
4065         }
4066
4067         if (mask & ETH_QINQ_STRIP_MASK) {
4068                 /* Enable or disable outer VLAN stripping */
4069                 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4070                         i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4071                 else
4072                         i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4073         }
4074
4075         return 0;
4076 }
4077
4078 static void
4079 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4080                           __rte_unused uint16_t queue,
4081                           __rte_unused int on)
4082 {
4083         PMD_INIT_FUNC_TRACE();
4084 }
4085
4086 static int
4087 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4088 {
4089         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4090         struct i40e_vsi *vsi = pf->main_vsi;
4091         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4092         struct i40e_vsi_vlan_pvid_info info;
4093
4094         memset(&info, 0, sizeof(info));
4095         info.on = on;
4096         if (info.on)
4097                 info.config.pvid = pvid;
4098         else {
4099                 info.config.reject.tagged =
4100                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
4101                 info.config.reject.untagged =
4102                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
4103         }
4104
4105         return i40e_vsi_vlan_pvid_set(vsi, &info);
4106 }
4107
4108 static int
4109 i40e_dev_led_on(struct rte_eth_dev *dev)
4110 {
4111         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4112         uint32_t mode = i40e_led_get(hw);
4113
4114         if (mode == 0)
4115                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4116
4117         return 0;
4118 }
4119
4120 static int
4121 i40e_dev_led_off(struct rte_eth_dev *dev)
4122 {
4123         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4124         uint32_t mode = i40e_led_get(hw);
4125
4126         if (mode != 0)
4127                 i40e_led_set(hw, 0, false);
4128
4129         return 0;
4130 }
4131
4132 static int
4133 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4134 {
4135         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4136         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4137
4138         fc_conf->pause_time = pf->fc_conf.pause_time;
4139
4140         /* read out from register, in case they are modified by other port */
4141         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4142                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4143         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4144                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4145
4146         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4147         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4148
4149          /* Return current mode according to actual setting*/
4150         switch (hw->fc.current_mode) {
4151         case I40E_FC_FULL:
4152                 fc_conf->mode = RTE_FC_FULL;
4153                 break;
4154         case I40E_FC_TX_PAUSE:
4155                 fc_conf->mode = RTE_FC_TX_PAUSE;
4156                 break;
4157         case I40E_FC_RX_PAUSE:
4158                 fc_conf->mode = RTE_FC_RX_PAUSE;
4159                 break;
4160         case I40E_FC_NONE:
4161         default:
4162                 fc_conf->mode = RTE_FC_NONE;
4163         };
4164
4165         return 0;
4166 }
4167
4168 static int
4169 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4170 {
4171         uint32_t mflcn_reg, fctrl_reg, reg;
4172         uint32_t max_high_water;
4173         uint8_t i, aq_failure;
4174         int err;
4175         struct i40e_hw *hw;
4176         struct i40e_pf *pf;
4177         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4178                 [RTE_FC_NONE] = I40E_FC_NONE,
4179                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4180                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4181                 [RTE_FC_FULL] = I40E_FC_FULL
4182         };
4183
4184         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4185
4186         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4187         if ((fc_conf->high_water > max_high_water) ||
4188                         (fc_conf->high_water < fc_conf->low_water)) {
4189                 PMD_INIT_LOG(ERR,
4190                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
4191                         max_high_water);
4192                 return -EINVAL;
4193         }
4194
4195         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4196         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4197         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4198
4199         pf->fc_conf.pause_time = fc_conf->pause_time;
4200         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4201         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4202
4203         PMD_INIT_FUNC_TRACE();
4204
4205         /* All the link flow control related enable/disable register
4206          * configuration is handle by the F/W
4207          */
4208         err = i40e_set_fc(hw, &aq_failure, true);
4209         if (err < 0)
4210                 return -ENOSYS;
4211
4212         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4213                 /* Configure flow control refresh threshold,
4214                  * the value for stat_tx_pause_refresh_timer[8]
4215                  * is used for global pause operation.
4216                  */
4217
4218                 I40E_WRITE_REG(hw,
4219                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4220                                pf->fc_conf.pause_time);
4221
4222                 /* configure the timer value included in transmitted pause
4223                  * frame,
4224                  * the value for stat_tx_pause_quanta[8] is used for global
4225                  * pause operation
4226                  */
4227                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4228                                pf->fc_conf.pause_time);
4229
4230                 fctrl_reg = I40E_READ_REG(hw,
4231                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4232
4233                 if (fc_conf->mac_ctrl_frame_fwd != 0)
4234                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4235                 else
4236                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4237
4238                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4239                                fctrl_reg);
4240         } else {
4241                 /* Configure pause time (2 TCs per register) */
4242                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4243                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4244                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4245
4246                 /* Configure flow control refresh threshold value */
4247                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4248                                pf->fc_conf.pause_time / 2);
4249
4250                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4251
4252                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
4253                  *depending on configuration
4254                  */
4255                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
4256                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4257                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4258                 } else {
4259                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4260                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4261                 }
4262
4263                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4264         }
4265
4266         if (!pf->support_multi_driver) {
4267                 /* config water marker both based on the packets and bytes */
4268                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4269                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4270                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4271                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4272                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4273                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4274                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4275                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4276                                   << I40E_KILOSHIFT);
4277                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4278                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4279                                    << I40E_KILOSHIFT);
4280         } else {
4281                 PMD_DRV_LOG(ERR,
4282                             "Water marker configuration is not supported.");
4283         }
4284
4285         I40E_WRITE_FLUSH(hw);
4286
4287         return 0;
4288 }
4289
4290 static int
4291 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4292                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4293 {
4294         PMD_INIT_FUNC_TRACE();
4295
4296         return -ENOSYS;
4297 }
4298
4299 /* Add a MAC address, and update filters */
4300 static int
4301 i40e_macaddr_add(struct rte_eth_dev *dev,
4302                  struct rte_ether_addr *mac_addr,
4303                  __rte_unused uint32_t index,
4304                  uint32_t pool)
4305 {
4306         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4307         struct i40e_mac_filter_info mac_filter;
4308         struct i40e_vsi *vsi;
4309         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4310         int ret;
4311
4312         /* If VMDQ not enabled or configured, return */
4313         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4314                           !pf->nb_cfg_vmdq_vsi)) {
4315                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4316                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4317                         pool);
4318                 return -ENOTSUP;
4319         }
4320
4321         if (pool > pf->nb_cfg_vmdq_vsi) {
4322                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4323                                 pool, pf->nb_cfg_vmdq_vsi);
4324                 return -EINVAL;
4325         }
4326
4327         rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4328         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4329                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4330         else
4331                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
4332
4333         if (pool == 0)
4334                 vsi = pf->main_vsi;
4335         else
4336                 vsi = pf->vmdq[pool - 1].vsi;
4337
4338         ret = i40e_vsi_add_mac(vsi, &mac_filter);
4339         if (ret != I40E_SUCCESS) {
4340                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4341                 return -ENODEV;
4342         }
4343         return 0;
4344 }
4345
4346 /* Remove a MAC address, and update filters */
4347 static void
4348 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4349 {
4350         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4351         struct i40e_vsi *vsi;
4352         struct rte_eth_dev_data *data = dev->data;
4353         struct rte_ether_addr *macaddr;
4354         int ret;
4355         uint32_t i;
4356         uint64_t pool_sel;
4357
4358         macaddr = &(data->mac_addrs[index]);
4359
4360         pool_sel = dev->data->mac_pool_sel[index];
4361
4362         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4363                 if (pool_sel & (1ULL << i)) {
4364                         if (i == 0)
4365                                 vsi = pf->main_vsi;
4366                         else {
4367                                 /* No VMDQ pool enabled or configured */
4368                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
4369                                         (i > pf->nb_cfg_vmdq_vsi)) {
4370                                         PMD_DRV_LOG(ERR,
4371                                                 "No VMDQ pool enabled/configured");
4372                                         return;
4373                                 }
4374                                 vsi = pf->vmdq[i - 1].vsi;
4375                         }
4376                         ret = i40e_vsi_delete_mac(vsi, macaddr);
4377
4378                         if (ret) {
4379                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4380                                 return;
4381                         }
4382                 }
4383         }
4384 }
4385
4386 /* Set perfect match or hash match of MAC and VLAN for a VF */
4387 static int
4388 i40e_vf_mac_filter_set(struct i40e_pf *pf,
4389                  struct rte_eth_mac_filter *filter,
4390                  bool add)
4391 {
4392         struct i40e_hw *hw;
4393         struct i40e_mac_filter_info mac_filter;
4394         struct rte_ether_addr old_mac;
4395         struct rte_ether_addr *new_mac;
4396         struct i40e_pf_vf *vf = NULL;
4397         uint16_t vf_id;
4398         int ret;
4399
4400         if (pf == NULL) {
4401                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
4402                 return -EINVAL;
4403         }
4404         hw = I40E_PF_TO_HW(pf);
4405
4406         if (filter == NULL) {
4407                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
4408                 return -EINVAL;
4409         }
4410
4411         new_mac = &filter->mac_addr;
4412
4413         if (rte_is_zero_ether_addr(new_mac)) {
4414                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
4415                 return -EINVAL;
4416         }
4417
4418         vf_id = filter->dst_id;
4419
4420         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
4421                 PMD_DRV_LOG(ERR, "Invalid argument.");
4422                 return -EINVAL;
4423         }
4424         vf = &pf->vfs[vf_id];
4425
4426         if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) {
4427                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
4428                 return -EINVAL;
4429         }
4430
4431         if (add) {
4432                 rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
4433                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
4434                                 RTE_ETHER_ADDR_LEN);
4435                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
4436                                  RTE_ETHER_ADDR_LEN);
4437
4438                 mac_filter.filter_type = filter->filter_type;
4439                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
4440                 if (ret != I40E_SUCCESS) {
4441                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
4442                         return -1;
4443                 }
4444                 rte_ether_addr_copy(new_mac, &pf->dev_addr);
4445         } else {
4446                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
4447                                 RTE_ETHER_ADDR_LEN);
4448                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
4449                 if (ret != I40E_SUCCESS) {
4450                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
4451                         return -1;
4452                 }
4453
4454                 /* Clear device address as it has been removed */
4455                 if (rte_is_same_ether_addr(&pf->dev_addr, new_mac))
4456                         memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr));
4457         }
4458
4459         return 0;
4460 }
4461
4462 /* MAC filter handle */
4463 static int
4464 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4465                 void *arg)
4466 {
4467         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4468         struct rte_eth_mac_filter *filter;
4469         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4470         int ret = I40E_NOT_SUPPORTED;
4471
4472         filter = (struct rte_eth_mac_filter *)(arg);
4473
4474         switch (filter_op) {
4475         case RTE_ETH_FILTER_NOP:
4476                 ret = I40E_SUCCESS;
4477                 break;
4478         case RTE_ETH_FILTER_ADD:
4479                 i40e_pf_disable_irq0(hw);
4480                 if (filter->is_vf)
4481                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
4482                 i40e_pf_enable_irq0(hw);
4483                 break;
4484         case RTE_ETH_FILTER_DELETE:
4485                 i40e_pf_disable_irq0(hw);
4486                 if (filter->is_vf)
4487                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
4488                 i40e_pf_enable_irq0(hw);
4489                 break;
4490         default:
4491                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4492                 ret = I40E_ERR_PARAM;
4493                 break;
4494         }
4495
4496         return ret;
4497 }
4498
4499 static int
4500 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4501 {
4502         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4503         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4504         uint32_t reg;
4505         int ret;
4506
4507         if (!lut)
4508                 return -EINVAL;
4509
4510         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4511                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4512                                           vsi->type != I40E_VSI_SRIOV,
4513                                           lut, lut_size);
4514                 if (ret) {
4515                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4516                         return ret;
4517                 }
4518         } else {
4519                 uint32_t *lut_dw = (uint32_t *)lut;
4520                 uint16_t i, lut_size_dw = lut_size / 4;
4521
4522                 if (vsi->type == I40E_VSI_SRIOV) {
4523                         for (i = 0; i <= lut_size_dw; i++) {
4524                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4525                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4526                         }
4527                 } else {
4528                         for (i = 0; i < lut_size_dw; i++)
4529                                 lut_dw[i] = I40E_READ_REG(hw,
4530                                                           I40E_PFQF_HLUT(i));
4531                 }
4532         }
4533
4534         return 0;
4535 }
4536
4537 int
4538 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4539 {
4540         struct i40e_pf *pf;
4541         struct i40e_hw *hw;
4542         int ret;
4543
4544         if (!vsi || !lut)
4545                 return -EINVAL;
4546
4547         pf = I40E_VSI_TO_PF(vsi);
4548         hw = I40E_VSI_TO_HW(vsi);
4549
4550         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4551                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4552                                           vsi->type != I40E_VSI_SRIOV,
4553                                           lut, lut_size);
4554                 if (ret) {
4555                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4556                         return ret;
4557                 }
4558         } else {
4559                 uint32_t *lut_dw = (uint32_t *)lut;
4560                 uint16_t i, lut_size_dw = lut_size / 4;
4561
4562                 if (vsi->type == I40E_VSI_SRIOV) {
4563                         for (i = 0; i < lut_size_dw; i++)
4564                                 I40E_WRITE_REG(
4565                                         hw,
4566                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4567                                         lut_dw[i]);
4568                 } else {
4569                         for (i = 0; i < lut_size_dw; i++)
4570                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4571                                                lut_dw[i]);
4572                 }
4573                 I40E_WRITE_FLUSH(hw);
4574         }
4575
4576         return 0;
4577 }
4578
4579 static int
4580 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4581                          struct rte_eth_rss_reta_entry64 *reta_conf,
4582                          uint16_t reta_size)
4583 {
4584         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4585         uint16_t i, lut_size = pf->hash_lut_size;
4586         uint16_t idx, shift;
4587         uint8_t *lut;
4588         int ret;
4589
4590         if (reta_size != lut_size ||
4591                 reta_size > ETH_RSS_RETA_SIZE_512) {
4592                 PMD_DRV_LOG(ERR,
4593                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4594                         reta_size, lut_size);
4595                 return -EINVAL;
4596         }
4597
4598         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4599         if (!lut) {
4600                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4601                 return -ENOMEM;
4602         }
4603         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4604         if (ret)
4605                 goto out;
4606         for (i = 0; i < reta_size; i++) {
4607                 idx = i / RTE_RETA_GROUP_SIZE;
4608                 shift = i % RTE_RETA_GROUP_SIZE;
4609                 if (reta_conf[idx].mask & (1ULL << shift))
4610                         lut[i] = reta_conf[idx].reta[shift];
4611         }
4612         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4613
4614         pf->adapter->rss_reta_updated = 1;
4615
4616 out:
4617         rte_free(lut);
4618
4619         return ret;
4620 }
4621
4622 static int
4623 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4624                         struct rte_eth_rss_reta_entry64 *reta_conf,
4625                         uint16_t reta_size)
4626 {
4627         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4628         uint16_t i, lut_size = pf->hash_lut_size;
4629         uint16_t idx, shift;
4630         uint8_t *lut;
4631         int ret;
4632
4633         if (reta_size != lut_size ||
4634                 reta_size > ETH_RSS_RETA_SIZE_512) {
4635                 PMD_DRV_LOG(ERR,
4636                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4637                         reta_size, lut_size);
4638                 return -EINVAL;
4639         }
4640
4641         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4642         if (!lut) {
4643                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4644                 return -ENOMEM;
4645         }
4646
4647         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4648         if (ret)
4649                 goto out;
4650         for (i = 0; i < reta_size; i++) {
4651                 idx = i / RTE_RETA_GROUP_SIZE;
4652                 shift = i % RTE_RETA_GROUP_SIZE;
4653                 if (reta_conf[idx].mask & (1ULL << shift))
4654                         reta_conf[idx].reta[shift] = lut[i];
4655         }
4656
4657 out:
4658         rte_free(lut);
4659
4660         return ret;
4661 }
4662
4663 /**
4664  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4665  * @hw:   pointer to the HW structure
4666  * @mem:  pointer to mem struct to fill out
4667  * @size: size of memory requested
4668  * @alignment: what to align the allocation to
4669  **/
4670 enum i40e_status_code
4671 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4672                         struct i40e_dma_mem *mem,
4673                         u64 size,
4674                         u32 alignment)
4675 {
4676         const struct rte_memzone *mz = NULL;
4677         char z_name[RTE_MEMZONE_NAMESIZE];
4678
4679         if (!mem)
4680                 return I40E_ERR_PARAM;
4681
4682         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4683         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4684                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4685         if (!mz)
4686                 return I40E_ERR_NO_MEMORY;
4687
4688         mem->size = size;
4689         mem->va = mz->addr;
4690         mem->pa = mz->iova;
4691         mem->zone = (const void *)mz;
4692         PMD_DRV_LOG(DEBUG,
4693                 "memzone %s allocated with physical address: %"PRIu64,
4694                 mz->name, mem->pa);
4695
4696         return I40E_SUCCESS;
4697 }
4698
4699 /**
4700  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4701  * @hw:   pointer to the HW structure
4702  * @mem:  ptr to mem struct to free
4703  **/
4704 enum i40e_status_code
4705 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4706                     struct i40e_dma_mem *mem)
4707 {
4708         if (!mem)
4709                 return I40E_ERR_PARAM;
4710
4711         PMD_DRV_LOG(DEBUG,
4712                 "memzone %s to be freed with physical address: %"PRIu64,
4713                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4714         rte_memzone_free((const struct rte_memzone *)mem->zone);
4715         mem->zone = NULL;
4716         mem->va = NULL;
4717         mem->pa = (u64)0;
4718
4719         return I40E_SUCCESS;
4720 }
4721
4722 /**
4723  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4724  * @hw:   pointer to the HW structure
4725  * @mem:  pointer to mem struct to fill out
4726  * @size: size of memory requested
4727  **/
4728 enum i40e_status_code
4729 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4730                          struct i40e_virt_mem *mem,
4731                          u32 size)
4732 {
4733         if (!mem)
4734                 return I40E_ERR_PARAM;
4735
4736         mem->size = size;
4737         mem->va = rte_zmalloc("i40e", size, 0);
4738
4739         if (mem->va)
4740                 return I40E_SUCCESS;
4741         else
4742                 return I40E_ERR_NO_MEMORY;
4743 }
4744
4745 /**
4746  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4747  * @hw:   pointer to the HW structure
4748  * @mem:  pointer to mem struct to free
4749  **/
4750 enum i40e_status_code
4751 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4752                      struct i40e_virt_mem *mem)
4753 {
4754         if (!mem)
4755                 return I40E_ERR_PARAM;
4756
4757         rte_free(mem->va);
4758         mem->va = NULL;
4759
4760         return I40E_SUCCESS;
4761 }
4762
4763 void
4764 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4765 {
4766         rte_spinlock_init(&sp->spinlock);
4767 }
4768
4769 void
4770 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4771 {
4772         rte_spinlock_lock(&sp->spinlock);
4773 }
4774
4775 void
4776 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4777 {
4778         rte_spinlock_unlock(&sp->spinlock);
4779 }
4780
4781 void
4782 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4783 {
4784         return;
4785 }
4786
4787 /**
4788  * Get the hardware capabilities, which will be parsed
4789  * and saved into struct i40e_hw.
4790  */
4791 static int
4792 i40e_get_cap(struct i40e_hw *hw)
4793 {
4794         struct i40e_aqc_list_capabilities_element_resp *buf;
4795         uint16_t len, size = 0;
4796         int ret;
4797
4798         /* Calculate a huge enough buff for saving response data temporarily */
4799         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4800                                                 I40E_MAX_CAP_ELE_NUM;
4801         buf = rte_zmalloc("i40e", len, 0);
4802         if (!buf) {
4803                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4804                 return I40E_ERR_NO_MEMORY;
4805         }
4806
4807         /* Get, parse the capabilities and save it to hw */
4808         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4809                         i40e_aqc_opc_list_func_capabilities, NULL);
4810         if (ret != I40E_SUCCESS)
4811                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4812
4813         /* Free the temporary buffer after being used */
4814         rte_free(buf);
4815
4816         return ret;
4817 }
4818
4819 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4820
4821 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4822                 const char *value,
4823                 void *opaque)
4824 {
4825         struct i40e_pf *pf;
4826         unsigned long num;
4827         char *end;
4828
4829         pf = (struct i40e_pf *)opaque;
4830         RTE_SET_USED(key);
4831
4832         errno = 0;
4833         num = strtoul(value, &end, 0);
4834         if (errno != 0 || end == value || *end != 0) {
4835                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4836                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4837                 return -(EINVAL);
4838         }
4839
4840         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4841                 pf->vf_nb_qp_max = (uint16_t)num;
4842         else
4843                 /* here return 0 to make next valid same argument work */
4844                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4845                             "power of 2 and equal or less than 16 !, Now it is "
4846                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4847
4848         return 0;
4849 }
4850
4851 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4852 {
4853         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4854         struct rte_kvargs *kvlist;
4855         int kvargs_count;
4856
4857         /* set default queue number per VF as 4 */
4858         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4859
4860         if (dev->device->devargs == NULL)
4861                 return 0;
4862
4863         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4864         if (kvlist == NULL)
4865                 return -(EINVAL);
4866
4867         kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4868         if (!kvargs_count) {
4869                 rte_kvargs_free(kvlist);
4870                 return 0;
4871         }
4872
4873         if (kvargs_count > 1)
4874                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4875                             "the first invalid or last valid one is used !",
4876                             ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4877
4878         rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4879                            i40e_pf_parse_vf_queue_number_handler, pf);
4880
4881         rte_kvargs_free(kvlist);
4882
4883         return 0;
4884 }
4885
4886 static int
4887 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4888 {
4889         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4890         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4891         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4892         uint16_t qp_count = 0, vsi_count = 0;
4893
4894         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4895                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4896                 return -EINVAL;
4897         }
4898
4899         i40e_pf_config_vf_rxq_number(dev);
4900
4901         /* Add the parameter init for LFC */
4902         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4903         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4904         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4905
4906         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4907         pf->max_num_vsi = hw->func_caps.num_vsis;
4908         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4909         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4910
4911         /* FDir queue/VSI allocation */
4912         pf->fdir_qp_offset = 0;
4913         if (hw->func_caps.fd) {
4914                 pf->flags |= I40E_FLAG_FDIR;
4915                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4916         } else {
4917                 pf->fdir_nb_qps = 0;
4918         }
4919         qp_count += pf->fdir_nb_qps;
4920         vsi_count += 1;
4921
4922         /* LAN queue/VSI allocation */
4923         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4924         if (!hw->func_caps.rss) {
4925                 pf->lan_nb_qps = 1;
4926         } else {
4927                 pf->flags |= I40E_FLAG_RSS;
4928                 if (hw->mac.type == I40E_MAC_X722)
4929                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4930                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4931         }
4932         qp_count += pf->lan_nb_qps;
4933         vsi_count += 1;
4934
4935         /* VF queue/VSI allocation */
4936         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4937         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4938                 pf->flags |= I40E_FLAG_SRIOV;
4939                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4940                 pf->vf_num = pci_dev->max_vfs;
4941                 PMD_DRV_LOG(DEBUG,
4942                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4943                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4944         } else {
4945                 pf->vf_nb_qps = 0;
4946                 pf->vf_num = 0;
4947         }
4948         qp_count += pf->vf_nb_qps * pf->vf_num;
4949         vsi_count += pf->vf_num;
4950
4951         /* VMDq queue/VSI allocation */
4952         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4953         pf->vmdq_nb_qps = 0;
4954         pf->max_nb_vmdq_vsi = 0;
4955         if (hw->func_caps.vmdq) {
4956                 if (qp_count < hw->func_caps.num_tx_qp &&
4957                         vsi_count < hw->func_caps.num_vsis) {
4958                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4959                                 qp_count) / pf->vmdq_nb_qp_max;
4960
4961                         /* Limit the maximum number of VMDq vsi to the maximum
4962                          * ethdev can support
4963                          */
4964                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4965                                 hw->func_caps.num_vsis - vsi_count);
4966                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4967                                 ETH_64_POOLS);
4968                         if (pf->max_nb_vmdq_vsi) {
4969                                 pf->flags |= I40E_FLAG_VMDQ;
4970                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4971                                 PMD_DRV_LOG(DEBUG,
4972                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4973                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4974                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4975                         } else {
4976                                 PMD_DRV_LOG(INFO,
4977                                         "No enough queues left for VMDq");
4978                         }
4979                 } else {
4980                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4981                 }
4982         }
4983         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4984         vsi_count += pf->max_nb_vmdq_vsi;
4985
4986         if (hw->func_caps.dcb)
4987                 pf->flags |= I40E_FLAG_DCB;
4988
4989         if (qp_count > hw->func_caps.num_tx_qp) {
4990                 PMD_DRV_LOG(ERR,
4991                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4992                         qp_count, hw->func_caps.num_tx_qp);
4993                 return -EINVAL;
4994         }
4995         if (vsi_count > hw->func_caps.num_vsis) {
4996                 PMD_DRV_LOG(ERR,
4997                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4998                         vsi_count, hw->func_caps.num_vsis);
4999                 return -EINVAL;
5000         }
5001
5002         return 0;
5003 }
5004
5005 static int
5006 i40e_pf_get_switch_config(struct i40e_pf *pf)
5007 {
5008         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5009         struct i40e_aqc_get_switch_config_resp *switch_config;
5010         struct i40e_aqc_switch_config_element_resp *element;
5011         uint16_t start_seid = 0, num_reported;
5012         int ret;
5013
5014         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
5015                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
5016         if (!switch_config) {
5017                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
5018                 return -ENOMEM;
5019         }
5020
5021         /* Get the switch configurations */
5022         ret = i40e_aq_get_switch_config(hw, switch_config,
5023                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
5024         if (ret != I40E_SUCCESS) {
5025                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
5026                 goto fail;
5027         }
5028         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
5029         if (num_reported != 1) { /* The number should be 1 */
5030                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
5031                 goto fail;
5032         }
5033
5034         /* Parse the switch configuration elements */
5035         element = &(switch_config->element[0]);
5036         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
5037                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
5038                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
5039         } else
5040                 PMD_DRV_LOG(INFO, "Unknown element type");
5041
5042 fail:
5043         rte_free(switch_config);
5044
5045         return ret;
5046 }
5047
5048 static int
5049 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
5050                         uint32_t num)
5051 {
5052         struct pool_entry *entry;
5053
5054         if (pool == NULL || num == 0)
5055                 return -EINVAL;
5056
5057         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
5058         if (entry == NULL) {
5059                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
5060                 return -ENOMEM;
5061         }
5062
5063         /* queue heap initialize */
5064         pool->num_free = num;
5065         pool->num_alloc = 0;
5066         pool->base = base;
5067         LIST_INIT(&pool->alloc_list);
5068         LIST_INIT(&pool->free_list);
5069
5070         /* Initialize element  */
5071         entry->base = 0;
5072         entry->len = num;
5073
5074         LIST_INSERT_HEAD(&pool->free_list, entry, next);
5075         return 0;
5076 }
5077
5078 static void
5079 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
5080 {
5081         struct pool_entry *entry, *next_entry;
5082
5083         if (pool == NULL)
5084                 return;
5085
5086         for (entry = LIST_FIRST(&pool->alloc_list);
5087                         entry && (next_entry = LIST_NEXT(entry, next), 1);
5088                         entry = next_entry) {
5089                 LIST_REMOVE(entry, next);
5090                 rte_free(entry);
5091         }
5092
5093         for (entry = LIST_FIRST(&pool->free_list);
5094                         entry && (next_entry = LIST_NEXT(entry, next), 1);
5095                         entry = next_entry) {
5096                 LIST_REMOVE(entry, next);
5097                 rte_free(entry);
5098         }
5099
5100         pool->num_free = 0;
5101         pool->num_alloc = 0;
5102         pool->base = 0;
5103         LIST_INIT(&pool->alloc_list);
5104         LIST_INIT(&pool->free_list);
5105 }
5106
5107 static int
5108 i40e_res_pool_free(struct i40e_res_pool_info *pool,
5109                        uint32_t base)
5110 {
5111         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
5112         uint32_t pool_offset;
5113         uint16_t len;
5114         int insert;
5115
5116         if (pool == NULL) {
5117                 PMD_DRV_LOG(ERR, "Invalid parameter");
5118                 return -EINVAL;
5119         }
5120
5121         pool_offset = base - pool->base;
5122         /* Lookup in alloc list */
5123         LIST_FOREACH(entry, &pool->alloc_list, next) {
5124                 if (entry->base == pool_offset) {
5125                         valid_entry = entry;
5126                         LIST_REMOVE(entry, next);
5127                         break;
5128                 }
5129         }
5130
5131         /* Not find, return */
5132         if (valid_entry == NULL) {
5133                 PMD_DRV_LOG(ERR, "Failed to find entry");
5134                 return -EINVAL;
5135         }
5136
5137         /**
5138          * Found it, move it to free list  and try to merge.
5139          * In order to make merge easier, always sort it by qbase.
5140          * Find adjacent prev and last entries.
5141          */
5142         prev = next = NULL;
5143         LIST_FOREACH(entry, &pool->free_list, next) {
5144                 if (entry->base > valid_entry->base) {
5145                         next = entry;
5146                         break;
5147                 }
5148                 prev = entry;
5149         }
5150
5151         insert = 0;
5152         len = valid_entry->len;
5153         /* Try to merge with next one*/
5154         if (next != NULL) {
5155                 /* Merge with next one */
5156                 if (valid_entry->base + len == next->base) {
5157                         next->base = valid_entry->base;
5158                         next->len += len;
5159                         rte_free(valid_entry);
5160                         valid_entry = next;
5161                         insert = 1;
5162                 }
5163         }
5164
5165         if (prev != NULL) {
5166                 /* Merge with previous one */
5167                 if (prev->base + prev->len == valid_entry->base) {
5168                         prev->len += len;
5169                         /* If it merge with next one, remove next node */
5170                         if (insert == 1) {
5171                                 LIST_REMOVE(valid_entry, next);
5172                                 rte_free(valid_entry);
5173                                 valid_entry = NULL;
5174                         } else {
5175                                 rte_free(valid_entry);
5176                                 valid_entry = NULL;
5177                                 insert = 1;
5178                         }
5179                 }
5180         }
5181
5182         /* Not find any entry to merge, insert */
5183         if (insert == 0) {
5184                 if (prev != NULL)
5185                         LIST_INSERT_AFTER(prev, valid_entry, next);
5186                 else if (next != NULL)
5187                         LIST_INSERT_BEFORE(next, valid_entry, next);
5188                 else /* It's empty list, insert to head */
5189                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5190         }
5191
5192         pool->num_free += len;
5193         pool->num_alloc -= len;
5194
5195         return 0;
5196 }
5197
5198 static int
5199 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5200                        uint16_t num)
5201 {
5202         struct pool_entry *entry, *valid_entry;
5203
5204         if (pool == NULL || num == 0) {
5205                 PMD_DRV_LOG(ERR, "Invalid parameter");
5206                 return -EINVAL;
5207         }
5208
5209         if (pool->num_free < num) {
5210                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5211                             num, pool->num_free);
5212                 return -ENOMEM;
5213         }
5214
5215         valid_entry = NULL;
5216         /* Lookup  in free list and find most fit one */
5217         LIST_FOREACH(entry, &pool->free_list, next) {
5218                 if (entry->len >= num) {
5219                         /* Find best one */
5220                         if (entry->len == num) {
5221                                 valid_entry = entry;
5222                                 break;
5223                         }
5224                         if (valid_entry == NULL || valid_entry->len > entry->len)
5225                                 valid_entry = entry;
5226                 }
5227         }
5228
5229         /* Not find one to satisfy the request, return */
5230         if (valid_entry == NULL) {
5231                 PMD_DRV_LOG(ERR, "No valid entry found");
5232                 return -ENOMEM;
5233         }
5234         /**
5235          * The entry have equal queue number as requested,
5236          * remove it from alloc_list.
5237          */
5238         if (valid_entry->len == num) {
5239                 LIST_REMOVE(valid_entry, next);
5240         } else {
5241                 /**
5242                  * The entry have more numbers than requested,
5243                  * create a new entry for alloc_list and minus its
5244                  * queue base and number in free_list.
5245                  */
5246                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5247                 if (entry == NULL) {
5248                         PMD_DRV_LOG(ERR,
5249                                 "Failed to allocate memory for resource pool");
5250                         return -ENOMEM;
5251                 }
5252                 entry->base = valid_entry->base;
5253                 entry->len = num;
5254                 valid_entry->base += num;
5255                 valid_entry->len -= num;
5256                 valid_entry = entry;
5257         }
5258
5259         /* Insert it into alloc list, not sorted */
5260         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5261
5262         pool->num_free -= valid_entry->len;
5263         pool->num_alloc += valid_entry->len;
5264
5265         return valid_entry->base + pool->base;
5266 }
5267
5268 /**
5269  * bitmap_is_subset - Check whether src2 is subset of src1
5270  **/
5271 static inline int
5272 bitmap_is_subset(uint8_t src1, uint8_t src2)
5273 {
5274         return !((src1 ^ src2) & src2);
5275 }
5276
5277 static enum i40e_status_code
5278 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5279 {
5280         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5281
5282         /* If DCB is not supported, only default TC is supported */
5283         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5284                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5285                 return I40E_NOT_SUPPORTED;
5286         }
5287
5288         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5289                 PMD_DRV_LOG(ERR,
5290                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
5291                         hw->func_caps.enabled_tcmap, enabled_tcmap);
5292                 return I40E_NOT_SUPPORTED;
5293         }
5294         return I40E_SUCCESS;
5295 }
5296
5297 int
5298 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5299                                 struct i40e_vsi_vlan_pvid_info *info)
5300 {
5301         struct i40e_hw *hw;
5302         struct i40e_vsi_context ctxt;
5303         uint8_t vlan_flags = 0;
5304         int ret;
5305
5306         if (vsi == NULL || info == NULL) {
5307                 PMD_DRV_LOG(ERR, "invalid parameters");
5308                 return I40E_ERR_PARAM;
5309         }
5310
5311         if (info->on) {
5312                 vsi->info.pvid = info->config.pvid;
5313                 /**
5314                  * If insert pvid is enabled, only tagged pkts are
5315                  * allowed to be sent out.
5316                  */
5317                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5318                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5319         } else {
5320                 vsi->info.pvid = 0;
5321                 if (info->config.reject.tagged == 0)
5322                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5323
5324                 if (info->config.reject.untagged == 0)
5325                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5326         }
5327         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5328                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
5329         vsi->info.port_vlan_flags |= vlan_flags;
5330         vsi->info.valid_sections =
5331                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5332         memset(&ctxt, 0, sizeof(ctxt));
5333         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5334         ctxt.seid = vsi->seid;
5335
5336         hw = I40E_VSI_TO_HW(vsi);
5337         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5338         if (ret != I40E_SUCCESS)
5339                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5340
5341         return ret;
5342 }
5343
5344 static int
5345 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5346 {
5347         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5348         int i, ret;
5349         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5350
5351         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5352         if (ret != I40E_SUCCESS)
5353                 return ret;
5354
5355         if (!vsi->seid) {
5356                 PMD_DRV_LOG(ERR, "seid not valid");
5357                 return -EINVAL;
5358         }
5359
5360         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5361         tc_bw_data.tc_valid_bits = enabled_tcmap;
5362         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5363                 tc_bw_data.tc_bw_credits[i] =
5364                         (enabled_tcmap & (1 << i)) ? 1 : 0;
5365
5366         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5367         if (ret != I40E_SUCCESS) {
5368                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5369                 return ret;
5370         }
5371
5372         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5373                                         sizeof(vsi->info.qs_handle));
5374         return I40E_SUCCESS;
5375 }
5376
5377 static enum i40e_status_code
5378 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5379                                  struct i40e_aqc_vsi_properties_data *info,
5380                                  uint8_t enabled_tcmap)
5381 {
5382         enum i40e_status_code ret;
5383         int i, total_tc = 0;
5384         uint16_t qpnum_per_tc, bsf, qp_idx;
5385
5386         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5387         if (ret != I40E_SUCCESS)
5388                 return ret;
5389
5390         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5391                 if (enabled_tcmap & (1 << i))
5392                         total_tc++;
5393         if (total_tc == 0)
5394                 total_tc = 1;
5395         vsi->enabled_tc = enabled_tcmap;
5396
5397         /* Number of queues per enabled TC */
5398         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5399         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5400         bsf = rte_bsf32(qpnum_per_tc);
5401
5402         /* Adjust the queue number to actual queues that can be applied */
5403         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5404                 vsi->nb_qps = qpnum_per_tc * total_tc;
5405
5406         /**
5407          * Configure TC and queue mapping parameters, for enabled TC,
5408          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5409          * default queue will serve it.
5410          */
5411         qp_idx = 0;
5412         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5413                 if (vsi->enabled_tc & (1 << i)) {
5414                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5415                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5416                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5417                         qp_idx += qpnum_per_tc;
5418                 } else
5419                         info->tc_mapping[i] = 0;
5420         }
5421
5422         /* Associate queue number with VSI */
5423         if (vsi->type == I40E_VSI_SRIOV) {
5424                 info->mapping_flags |=
5425                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5426                 for (i = 0; i < vsi->nb_qps; i++)
5427                         info->queue_mapping[i] =
5428                                 rte_cpu_to_le_16(vsi->base_queue + i);
5429         } else {
5430                 info->mapping_flags |=
5431                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5432                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5433         }
5434         info->valid_sections |=
5435                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5436
5437         return I40E_SUCCESS;
5438 }
5439
5440 static int
5441 i40e_veb_release(struct i40e_veb *veb)
5442 {
5443         struct i40e_vsi *vsi;
5444         struct i40e_hw *hw;
5445
5446         if (veb == NULL)
5447                 return -EINVAL;
5448
5449         if (!TAILQ_EMPTY(&veb->head)) {
5450                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5451                 return -EACCES;
5452         }
5453         /* associate_vsi field is NULL for floating VEB */
5454         if (veb->associate_vsi != NULL) {
5455                 vsi = veb->associate_vsi;
5456                 hw = I40E_VSI_TO_HW(vsi);
5457
5458                 vsi->uplink_seid = veb->uplink_seid;
5459                 vsi->veb = NULL;
5460         } else {
5461                 veb->associate_pf->main_vsi->floating_veb = NULL;
5462                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5463         }
5464
5465         i40e_aq_delete_element(hw, veb->seid, NULL);
5466         rte_free(veb);
5467         return I40E_SUCCESS;
5468 }
5469
5470 /* Setup a veb */
5471 static struct i40e_veb *
5472 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5473 {
5474         struct i40e_veb *veb;
5475         int ret;
5476         struct i40e_hw *hw;
5477
5478         if (pf == NULL) {
5479                 PMD_DRV_LOG(ERR,
5480                             "veb setup failed, associated PF shouldn't null");
5481                 return NULL;
5482         }
5483         hw = I40E_PF_TO_HW(pf);
5484
5485         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5486         if (!veb) {
5487                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5488                 goto fail;
5489         }
5490
5491         veb->associate_vsi = vsi;
5492         veb->associate_pf = pf;
5493         TAILQ_INIT(&veb->head);
5494         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5495
5496         /* create floating veb if vsi is NULL */
5497         if (vsi != NULL) {
5498                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5499                                       I40E_DEFAULT_TCMAP, false,
5500                                       &veb->seid, false, NULL);
5501         } else {
5502                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5503                                       true, &veb->seid, false, NULL);
5504         }
5505
5506         if (ret != I40E_SUCCESS) {
5507                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5508                             hw->aq.asq_last_status);
5509                 goto fail;
5510         }
5511         veb->enabled_tc = I40E_DEFAULT_TCMAP;
5512
5513         /* get statistics index */
5514         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5515                                 &veb->stats_idx, NULL, NULL, NULL);
5516         if (ret != I40E_SUCCESS) {
5517                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5518                             hw->aq.asq_last_status);
5519                 goto fail;
5520         }
5521         /* Get VEB bandwidth, to be implemented */
5522         /* Now associated vsi binding to the VEB, set uplink to this VEB */
5523         if (vsi)
5524                 vsi->uplink_seid = veb->seid;
5525
5526         return veb;
5527 fail:
5528         rte_free(veb);
5529         return NULL;
5530 }
5531
5532 int
5533 i40e_vsi_release(struct i40e_vsi *vsi)
5534 {
5535         struct i40e_pf *pf;
5536         struct i40e_hw *hw;
5537         struct i40e_vsi_list *vsi_list;
5538         void *temp;
5539         int ret;
5540         struct i40e_mac_filter *f;
5541         uint16_t user_param;
5542
5543         if (!vsi)
5544                 return I40E_SUCCESS;
5545
5546         if (!vsi->adapter)
5547                 return -EFAULT;
5548
5549         user_param = vsi->user_param;
5550
5551         pf = I40E_VSI_TO_PF(vsi);
5552         hw = I40E_VSI_TO_HW(vsi);
5553
5554         /* VSI has child to attach, release child first */
5555         if (vsi->veb) {
5556                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5557                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5558                                 return -1;
5559                 }
5560                 i40e_veb_release(vsi->veb);
5561         }
5562
5563         if (vsi->floating_veb) {
5564                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5565                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5566                                 return -1;
5567                 }
5568         }
5569
5570         /* Remove all macvlan filters of the VSI */
5571         i40e_vsi_remove_all_macvlan_filter(vsi);
5572         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5573                 rte_free(f);
5574
5575         if (vsi->type != I40E_VSI_MAIN &&
5576             ((vsi->type != I40E_VSI_SRIOV) ||
5577             !pf->floating_veb_list[user_param])) {
5578                 /* Remove vsi from parent's sibling list */
5579                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5580                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5581                         return I40E_ERR_PARAM;
5582                 }
5583                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5584                                 &vsi->sib_vsi_list, list);
5585
5586                 /* Remove all switch element of the VSI */
5587                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5588                 if (ret != I40E_SUCCESS)
5589                         PMD_DRV_LOG(ERR, "Failed to delete element");
5590         }
5591
5592         if ((vsi->type == I40E_VSI_SRIOV) &&
5593             pf->floating_veb_list[user_param]) {
5594                 /* Remove vsi from parent's sibling list */
5595                 if (vsi->parent_vsi == NULL ||
5596                     vsi->parent_vsi->floating_veb == NULL) {
5597                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5598                         return I40E_ERR_PARAM;
5599                 }
5600                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5601                              &vsi->sib_vsi_list, list);
5602
5603                 /* Remove all switch element of the VSI */
5604                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5605                 if (ret != I40E_SUCCESS)
5606                         PMD_DRV_LOG(ERR, "Failed to delete element");
5607         }
5608
5609         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5610
5611         if (vsi->type != I40E_VSI_SRIOV)
5612                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5613         rte_free(vsi);
5614
5615         return I40E_SUCCESS;
5616 }
5617
5618 static int
5619 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5620 {
5621         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5622         struct i40e_aqc_remove_macvlan_element_data def_filter;
5623         struct i40e_mac_filter_info filter;
5624         int ret;
5625
5626         if (vsi->type != I40E_VSI_MAIN)
5627                 return I40E_ERR_CONFIG;
5628         memset(&def_filter, 0, sizeof(def_filter));
5629         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5630                                         ETH_ADDR_LEN);
5631         def_filter.vlan_tag = 0;
5632         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5633                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5634         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5635         if (ret != I40E_SUCCESS) {
5636                 struct i40e_mac_filter *f;
5637                 struct rte_ether_addr *mac;
5638
5639                 PMD_DRV_LOG(DEBUG,
5640                             "Cannot remove the default macvlan filter");
5641                 /* It needs to add the permanent mac into mac list */
5642                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5643                 if (f == NULL) {
5644                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5645                         return I40E_ERR_NO_MEMORY;
5646                 }
5647                 mac = &f->mac_info.mac_addr;
5648                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5649                                 ETH_ADDR_LEN);
5650                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5651                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5652                 vsi->mac_num++;
5653
5654                 return ret;
5655         }
5656         rte_memcpy(&filter.mac_addr,
5657                 (struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5658         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5659         return i40e_vsi_add_mac(vsi, &filter);
5660 }
5661
5662 /*
5663  * i40e_vsi_get_bw_config - Query VSI BW Information
5664  * @vsi: the VSI to be queried
5665  *
5666  * Returns 0 on success, negative value on failure
5667  */
5668 static enum i40e_status_code
5669 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5670 {
5671         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5672         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5673         struct i40e_hw *hw = &vsi->adapter->hw;
5674         i40e_status ret;
5675         int i;
5676         uint32_t bw_max;
5677
5678         memset(&bw_config, 0, sizeof(bw_config));
5679         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5680         if (ret != I40E_SUCCESS) {
5681                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5682                             hw->aq.asq_last_status);
5683                 return ret;
5684         }
5685
5686         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5687         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5688                                         &ets_sla_config, NULL);
5689         if (ret != I40E_SUCCESS) {
5690                 PMD_DRV_LOG(ERR,
5691                         "VSI failed to get TC bandwdith configuration %u",
5692                         hw->aq.asq_last_status);
5693                 return ret;
5694         }
5695
5696         /* store and print out BW info */
5697         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5698         vsi->bw_info.bw_max = bw_config.max_bw;
5699         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5700         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5701         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5702                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5703                      I40E_16_BIT_WIDTH);
5704         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5705                 vsi->bw_info.bw_ets_share_credits[i] =
5706                                 ets_sla_config.share_credits[i];
5707                 vsi->bw_info.bw_ets_credits[i] =
5708                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5709                 /* 4 bits per TC, 4th bit is reserved */
5710                 vsi->bw_info.bw_ets_max[i] =
5711                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5712                                   RTE_LEN2MASK(3, uint8_t));
5713                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5714                             vsi->bw_info.bw_ets_share_credits[i]);
5715                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5716                             vsi->bw_info.bw_ets_credits[i]);
5717                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5718                             vsi->bw_info.bw_ets_max[i]);
5719         }
5720
5721         return I40E_SUCCESS;
5722 }
5723
5724 /* i40e_enable_pf_lb
5725  * @pf: pointer to the pf structure
5726  *
5727  * allow loopback on pf
5728  */
5729 static inline void
5730 i40e_enable_pf_lb(struct i40e_pf *pf)
5731 {
5732         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5733         struct i40e_vsi_context ctxt;
5734         int ret;
5735
5736         /* Use the FW API if FW >= v5.0 */
5737         if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5738                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5739                 return;
5740         }
5741
5742         memset(&ctxt, 0, sizeof(ctxt));
5743         ctxt.seid = pf->main_vsi_seid;
5744         ctxt.pf_num = hw->pf_id;
5745         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5746         if (ret) {
5747                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5748                             ret, hw->aq.asq_last_status);
5749                 return;
5750         }
5751         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5752         ctxt.info.valid_sections =
5753                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5754         ctxt.info.switch_id |=
5755                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5756
5757         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5758         if (ret)
5759                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5760                             hw->aq.asq_last_status);
5761 }
5762
5763 /* Setup a VSI */
5764 struct i40e_vsi *
5765 i40e_vsi_setup(struct i40e_pf *pf,
5766                enum i40e_vsi_type type,
5767                struct i40e_vsi *uplink_vsi,
5768                uint16_t user_param)
5769 {
5770         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5771         struct i40e_vsi *vsi;
5772         struct i40e_mac_filter_info filter;
5773         int ret;
5774         struct i40e_vsi_context ctxt;
5775         struct rte_ether_addr broadcast =
5776                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5777
5778         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5779             uplink_vsi == NULL) {
5780                 PMD_DRV_LOG(ERR,
5781                         "VSI setup failed, VSI link shouldn't be NULL");
5782                 return NULL;
5783         }
5784
5785         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5786                 PMD_DRV_LOG(ERR,
5787                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5788                 return NULL;
5789         }
5790
5791         /* two situations
5792          * 1.type is not MAIN and uplink vsi is not NULL
5793          * If uplink vsi didn't setup VEB, create one first under veb field
5794          * 2.type is SRIOV and the uplink is NULL
5795          * If floating VEB is NULL, create one veb under floating veb field
5796          */
5797
5798         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5799             uplink_vsi->veb == NULL) {
5800                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5801
5802                 if (uplink_vsi->veb == NULL) {
5803                         PMD_DRV_LOG(ERR, "VEB setup failed");
5804                         return NULL;
5805                 }
5806                 /* set ALLOWLOOPBACk on pf, when veb is created */
5807                 i40e_enable_pf_lb(pf);
5808         }
5809
5810         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5811             pf->main_vsi->floating_veb == NULL) {
5812                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5813
5814                 if (pf->main_vsi->floating_veb == NULL) {
5815                         PMD_DRV_LOG(ERR, "VEB setup failed");
5816                         return NULL;
5817                 }
5818         }
5819
5820         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5821         if (!vsi) {
5822                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5823                 return NULL;
5824         }
5825         TAILQ_INIT(&vsi->mac_list);
5826         vsi->type = type;
5827         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5828         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5829         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5830         vsi->user_param = user_param;
5831         vsi->vlan_anti_spoof_on = 0;
5832         vsi->vlan_filter_on = 0;
5833         /* Allocate queues */
5834         switch (vsi->type) {
5835         case I40E_VSI_MAIN  :
5836                 vsi->nb_qps = pf->lan_nb_qps;
5837                 break;
5838         case I40E_VSI_SRIOV :
5839                 vsi->nb_qps = pf->vf_nb_qps;
5840                 break;
5841         case I40E_VSI_VMDQ2:
5842                 vsi->nb_qps = pf->vmdq_nb_qps;
5843                 break;
5844         case I40E_VSI_FDIR:
5845                 vsi->nb_qps = pf->fdir_nb_qps;
5846                 break;
5847         default:
5848                 goto fail_mem;
5849         }
5850         /*
5851          * The filter status descriptor is reported in rx queue 0,
5852          * while the tx queue for fdir filter programming has no
5853          * such constraints, can be non-zero queues.
5854          * To simplify it, choose FDIR vsi use queue 0 pair.
5855          * To make sure it will use queue 0 pair, queue allocation
5856          * need be done before this function is called
5857          */
5858         if (type != I40E_VSI_FDIR) {
5859                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5860                         if (ret < 0) {
5861                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5862                                                 vsi->seid, ret);
5863                                 goto fail_mem;
5864                         }
5865                         vsi->base_queue = ret;
5866         } else
5867                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5868
5869         /* VF has MSIX interrupt in VF range, don't allocate here */
5870         if (type == I40E_VSI_MAIN) {
5871                 if (pf->support_multi_driver) {
5872                         /* If support multi-driver, need to use INT0 instead of
5873                          * allocating from msix pool. The Msix pool is init from
5874                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5875                          * to 1 without calling i40e_res_pool_alloc.
5876                          */
5877                         vsi->msix_intr = 0;
5878                         vsi->nb_msix = 1;
5879                 } else {
5880                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5881                                                   RTE_MIN(vsi->nb_qps,
5882                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5883                         if (ret < 0) {
5884                                 PMD_DRV_LOG(ERR,
5885                                             "VSI MAIN %d get heap failed %d",
5886                                             vsi->seid, ret);
5887                                 goto fail_queue_alloc;
5888                         }
5889                         vsi->msix_intr = ret;
5890                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5891                                                RTE_MAX_RXTX_INTR_VEC_ID);
5892                 }
5893         } else if (type != I40E_VSI_SRIOV) {
5894                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5895                 if (ret < 0) {
5896                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5897                         if (type != I40E_VSI_FDIR)
5898                                 goto fail_queue_alloc;
5899                         vsi->msix_intr = 0;
5900                         vsi->nb_msix = 0;
5901                 } else {
5902                         vsi->msix_intr = ret;
5903                         vsi->nb_msix = 1;
5904                 }
5905         } else {
5906                 vsi->msix_intr = 0;
5907                 vsi->nb_msix = 0;
5908         }
5909
5910         /* Add VSI */
5911         if (type == I40E_VSI_MAIN) {
5912                 /* For main VSI, no need to add since it's default one */
5913                 vsi->uplink_seid = pf->mac_seid;
5914                 vsi->seid = pf->main_vsi_seid;
5915                 /* Bind queues with specific MSIX interrupt */
5916                 /**
5917                  * Needs 2 interrupt at least, one for misc cause which will
5918                  * enabled from OS side, Another for queues binding the
5919                  * interrupt from device side only.
5920                  */
5921
5922                 /* Get default VSI parameters from hardware */
5923                 memset(&ctxt, 0, sizeof(ctxt));
5924                 ctxt.seid = vsi->seid;
5925                 ctxt.pf_num = hw->pf_id;
5926                 ctxt.uplink_seid = vsi->uplink_seid;
5927                 ctxt.vf_num = 0;
5928                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5929                 if (ret != I40E_SUCCESS) {
5930                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5931                         goto fail_msix_alloc;
5932                 }
5933                 rte_memcpy(&vsi->info, &ctxt.info,
5934                         sizeof(struct i40e_aqc_vsi_properties_data));
5935                 vsi->vsi_id = ctxt.vsi_number;
5936                 vsi->info.valid_sections = 0;
5937
5938                 /* Configure tc, enabled TC0 only */
5939                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5940                         I40E_SUCCESS) {
5941                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5942                         goto fail_msix_alloc;
5943                 }
5944
5945                 /* TC, queue mapping */
5946                 memset(&ctxt, 0, sizeof(ctxt));
5947                 vsi->info.valid_sections |=
5948                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5949                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5950                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5951                 rte_memcpy(&ctxt.info, &vsi->info,
5952                         sizeof(struct i40e_aqc_vsi_properties_data));
5953                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5954                                                 I40E_DEFAULT_TCMAP);
5955                 if (ret != I40E_SUCCESS) {
5956                         PMD_DRV_LOG(ERR,
5957                                 "Failed to configure TC queue mapping");
5958                         goto fail_msix_alloc;
5959                 }
5960                 ctxt.seid = vsi->seid;
5961                 ctxt.pf_num = hw->pf_id;
5962                 ctxt.uplink_seid = vsi->uplink_seid;
5963                 ctxt.vf_num = 0;
5964
5965                 /* Update VSI parameters */
5966                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5967                 if (ret != I40E_SUCCESS) {
5968                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5969                         goto fail_msix_alloc;
5970                 }
5971
5972                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5973                                                 sizeof(vsi->info.tc_mapping));
5974                 rte_memcpy(&vsi->info.queue_mapping,
5975                                 &ctxt.info.queue_mapping,
5976                         sizeof(vsi->info.queue_mapping));
5977                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5978                 vsi->info.valid_sections = 0;
5979
5980                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5981                                 ETH_ADDR_LEN);
5982
5983                 /**
5984                  * Updating default filter settings are necessary to prevent
5985                  * reception of tagged packets.
5986                  * Some old firmware configurations load a default macvlan
5987                  * filter which accepts both tagged and untagged packets.
5988                  * The updating is to use a normal filter instead if needed.
5989                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5990                  * The firmware with correct configurations load the default
5991                  * macvlan filter which is expected and cannot be removed.
5992                  */
5993                 i40e_update_default_filter_setting(vsi);
5994                 i40e_config_qinq(hw, vsi);
5995         } else if (type == I40E_VSI_SRIOV) {
5996                 memset(&ctxt, 0, sizeof(ctxt));
5997                 /**
5998                  * For other VSI, the uplink_seid equals to uplink VSI's
5999                  * uplink_seid since they share same VEB
6000                  */
6001                 if (uplink_vsi == NULL)
6002                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
6003                 else
6004                         vsi->uplink_seid = uplink_vsi->uplink_seid;
6005                 ctxt.pf_num = hw->pf_id;
6006                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
6007                 ctxt.uplink_seid = vsi->uplink_seid;
6008                 ctxt.connection_type = 0x1;
6009                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6010
6011                 /* Use the VEB configuration if FW >= v5.0 */
6012                 if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
6013                         /* Configure switch ID */
6014                         ctxt.info.valid_sections |=
6015                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6016                         ctxt.info.switch_id =
6017                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6018                 }
6019
6020                 /* Configure port/vlan */
6021                 ctxt.info.valid_sections |=
6022                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6023                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6024                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6025                                                 hw->func_caps.enabled_tcmap);
6026                 if (ret != I40E_SUCCESS) {
6027                         PMD_DRV_LOG(ERR,
6028                                 "Failed to configure TC queue mapping");
6029                         goto fail_msix_alloc;
6030                 }
6031
6032                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
6033                 ctxt.info.valid_sections |=
6034                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6035                 /**
6036                  * Since VSI is not created yet, only configure parameter,
6037                  * will add vsi below.
6038                  */
6039
6040                 i40e_config_qinq(hw, vsi);
6041         } else if (type == I40E_VSI_VMDQ2) {
6042                 memset(&ctxt, 0, sizeof(ctxt));
6043                 /*
6044                  * For other VSI, the uplink_seid equals to uplink VSI's
6045                  * uplink_seid since they share same VEB
6046                  */
6047                 vsi->uplink_seid = uplink_vsi->uplink_seid;
6048                 ctxt.pf_num = hw->pf_id;
6049                 ctxt.vf_num = 0;
6050                 ctxt.uplink_seid = vsi->uplink_seid;
6051                 ctxt.connection_type = 0x1;
6052                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6053
6054                 ctxt.info.valid_sections |=
6055                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6056                 /* user_param carries flag to enable loop back */
6057                 if (user_param) {
6058                         ctxt.info.switch_id =
6059                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6060                         ctxt.info.switch_id |=
6061                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6062                 }
6063
6064                 /* Configure port/vlan */
6065                 ctxt.info.valid_sections |=
6066                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6067                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6068                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6069                                                 I40E_DEFAULT_TCMAP);
6070                 if (ret != I40E_SUCCESS) {
6071                         PMD_DRV_LOG(ERR,
6072                                 "Failed to configure TC queue mapping");
6073                         goto fail_msix_alloc;
6074                 }
6075                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6076                 ctxt.info.valid_sections |=
6077                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6078         } else if (type == I40E_VSI_FDIR) {
6079                 memset(&ctxt, 0, sizeof(ctxt));
6080                 vsi->uplink_seid = uplink_vsi->uplink_seid;
6081                 ctxt.pf_num = hw->pf_id;
6082                 ctxt.vf_num = 0;
6083                 ctxt.uplink_seid = vsi->uplink_seid;
6084                 ctxt.connection_type = 0x1;     /* regular data port */
6085                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6086                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
6087                                                 I40E_DEFAULT_TCMAP);
6088                 if (ret != I40E_SUCCESS) {
6089                         PMD_DRV_LOG(ERR,
6090                                 "Failed to configure TC queue mapping.");
6091                         goto fail_msix_alloc;
6092                 }
6093                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
6094                 ctxt.info.valid_sections |=
6095                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
6096         } else {
6097                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
6098                 goto fail_msix_alloc;
6099         }
6100
6101         if (vsi->type != I40E_VSI_MAIN) {
6102                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6103                 if (ret != I40E_SUCCESS) {
6104                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
6105                                     hw->aq.asq_last_status);
6106                         goto fail_msix_alloc;
6107                 }
6108                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6109                 vsi->info.valid_sections = 0;
6110                 vsi->seid = ctxt.seid;
6111                 vsi->vsi_id = ctxt.vsi_number;
6112                 vsi->sib_vsi_list.vsi = vsi;
6113                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
6114                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
6115                                           &vsi->sib_vsi_list, list);
6116                 } else {
6117                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
6118                                           &vsi->sib_vsi_list, list);
6119                 }
6120         }
6121
6122         /* MAC/VLAN configuration */
6123         rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
6124         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
6125
6126         ret = i40e_vsi_add_mac(vsi, &filter);
6127         if (ret != I40E_SUCCESS) {
6128                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
6129                 goto fail_msix_alloc;
6130         }
6131
6132         /* Get VSI BW information */
6133         i40e_vsi_get_bw_config(vsi);
6134         return vsi;
6135 fail_msix_alloc:
6136         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
6137 fail_queue_alloc:
6138         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
6139 fail_mem:
6140         rte_free(vsi);
6141         return NULL;
6142 }
6143
6144 /* Configure vlan filter on or off */
6145 int
6146 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
6147 {
6148         int i, num;
6149         struct i40e_mac_filter *f;
6150         void *temp;
6151         struct i40e_mac_filter_info *mac_filter;
6152         enum rte_mac_filter_type desired_filter;
6153         int ret = I40E_SUCCESS;
6154
6155         if (on) {
6156                 /* Filter to match MAC and VLAN */
6157                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
6158         } else {
6159                 /* Filter to match only MAC */
6160                 desired_filter = RTE_MAC_PERFECT_MATCH;
6161         }
6162
6163         num = vsi->mac_num;
6164
6165         mac_filter = rte_zmalloc("mac_filter_info_data",
6166                                  num * sizeof(*mac_filter), 0);
6167         if (mac_filter == NULL) {
6168                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6169                 return I40E_ERR_NO_MEMORY;
6170         }
6171
6172         i = 0;
6173
6174         /* Remove all existing mac */
6175         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6176                 mac_filter[i] = f->mac_info;
6177                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6178                 if (ret) {
6179                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6180                                     on ? "enable" : "disable");
6181                         goto DONE;
6182                 }
6183                 i++;
6184         }
6185
6186         /* Override with new filter */
6187         for (i = 0; i < num; i++) {
6188                 mac_filter[i].filter_type = desired_filter;
6189                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6190                 if (ret) {
6191                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6192                                     on ? "enable" : "disable");
6193                         goto DONE;
6194                 }
6195         }
6196
6197 DONE:
6198         rte_free(mac_filter);
6199         return ret;
6200 }
6201
6202 /* Configure vlan stripping on or off */
6203 int
6204 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6205 {
6206         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6207         struct i40e_vsi_context ctxt;
6208         uint8_t vlan_flags;
6209         int ret = I40E_SUCCESS;
6210
6211         /* Check if it has been already on or off */
6212         if (vsi->info.valid_sections &
6213                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6214                 if (on) {
6215                         if ((vsi->info.port_vlan_flags &
6216                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6217                                 return 0; /* already on */
6218                 } else {
6219                         if ((vsi->info.port_vlan_flags &
6220                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6221                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
6222                                 return 0; /* already off */
6223                 }
6224         }
6225
6226         if (on)
6227                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6228         else
6229                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6230         vsi->info.valid_sections =
6231                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6232         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6233         vsi->info.port_vlan_flags |= vlan_flags;
6234         ctxt.seid = vsi->seid;
6235         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6236         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6237         if (ret)
6238                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6239                             on ? "enable" : "disable");
6240
6241         return ret;
6242 }
6243
6244 static int
6245 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6246 {
6247         struct rte_eth_dev_data *data = dev->data;
6248         int ret;
6249         int mask = 0;
6250
6251         /* Apply vlan offload setting */
6252         mask = ETH_VLAN_STRIP_MASK |
6253                ETH_QINQ_STRIP_MASK |
6254                ETH_VLAN_FILTER_MASK |
6255                ETH_VLAN_EXTEND_MASK;
6256         ret = i40e_vlan_offload_set(dev, mask);
6257         if (ret) {
6258                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6259                 return ret;
6260         }
6261
6262         /* Apply pvid setting */
6263         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6264                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
6265         if (ret)
6266                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
6267
6268         return ret;
6269 }
6270
6271 static int
6272 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6273 {
6274         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6275
6276         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6277 }
6278
6279 static int
6280 i40e_update_flow_control(struct i40e_hw *hw)
6281 {
6282 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6283         struct i40e_link_status link_status;
6284         uint32_t rxfc = 0, txfc = 0, reg;
6285         uint8_t an_info;
6286         int ret;
6287
6288         memset(&link_status, 0, sizeof(link_status));
6289         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6290         if (ret != I40E_SUCCESS) {
6291                 PMD_DRV_LOG(ERR, "Failed to get link status information");
6292                 goto write_reg; /* Disable flow control */
6293         }
6294
6295         an_info = hw->phy.link_info.an_info;
6296         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6297                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6298                 ret = I40E_ERR_NOT_READY;
6299                 goto write_reg; /* Disable flow control */
6300         }
6301         /**
6302          * If link auto negotiation is enabled, flow control needs to
6303          * be configured according to it
6304          */
6305         switch (an_info & I40E_LINK_PAUSE_RXTX) {
6306         case I40E_LINK_PAUSE_RXTX:
6307                 rxfc = 1;
6308                 txfc = 1;
6309                 hw->fc.current_mode = I40E_FC_FULL;
6310                 break;
6311         case I40E_AQ_LINK_PAUSE_RX:
6312                 rxfc = 1;
6313                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
6314                 break;
6315         case I40E_AQ_LINK_PAUSE_TX:
6316                 txfc = 1;
6317                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
6318                 break;
6319         default:
6320                 hw->fc.current_mode = I40E_FC_NONE;
6321                 break;
6322         }
6323
6324 write_reg:
6325         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6326                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6327         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6328         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6329         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6330         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6331
6332         return ret;
6333 }
6334
6335 /* PF setup */
6336 static int
6337 i40e_pf_setup(struct i40e_pf *pf)
6338 {
6339         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6340         struct i40e_filter_control_settings settings;
6341         struct i40e_vsi *vsi;
6342         int ret;
6343
6344         /* Clear all stats counters */
6345         pf->offset_loaded = FALSE;
6346         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6347         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6348         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6349         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6350
6351         ret = i40e_pf_get_switch_config(pf);
6352         if (ret != I40E_SUCCESS) {
6353                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6354                 return ret;
6355         }
6356
6357         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6358         if (ret)
6359                 PMD_INIT_LOG(WARNING,
6360                         "failed to allocate switch domain for device %d", ret);
6361
6362         if (pf->flags & I40E_FLAG_FDIR) {
6363                 /* make queue allocated first, let FDIR use queue pair 0*/
6364                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6365                 if (ret != I40E_FDIR_QUEUE_ID) {
6366                         PMD_DRV_LOG(ERR,
6367                                 "queue allocation fails for FDIR: ret =%d",
6368                                 ret);
6369                         pf->flags &= ~I40E_FLAG_FDIR;
6370                 }
6371         }
6372         /*  main VSI setup */
6373         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6374         if (!vsi) {
6375                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6376                 return I40E_ERR_NOT_READY;
6377         }
6378         pf->main_vsi = vsi;
6379
6380         /* Configure filter control */
6381         memset(&settings, 0, sizeof(settings));
6382         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6383                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6384         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6385                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6386         else {
6387                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6388                         hw->func_caps.rss_table_size);
6389                 return I40E_ERR_PARAM;
6390         }
6391         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6392                 hw->func_caps.rss_table_size);
6393         pf->hash_lut_size = hw->func_caps.rss_table_size;
6394
6395         /* Enable ethtype and macvlan filters */
6396         settings.enable_ethtype = TRUE;
6397         settings.enable_macvlan = TRUE;
6398         ret = i40e_set_filter_control(hw, &settings);
6399         if (ret)
6400                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6401                                                                 ret);
6402
6403         /* Update flow control according to the auto negotiation */
6404         i40e_update_flow_control(hw);
6405
6406         return I40E_SUCCESS;
6407 }
6408
6409 int
6410 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6411 {
6412         uint32_t reg;
6413         uint16_t j;
6414
6415         /**
6416          * Set or clear TX Queue Disable flags,
6417          * which is required by hardware.
6418          */
6419         i40e_pre_tx_queue_cfg(hw, q_idx, on);
6420         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6421
6422         /* Wait until the request is finished */
6423         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6424                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6425                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6426                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6427                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6428                                                         & 0x1))) {
6429                         break;
6430                 }
6431         }
6432         if (on) {
6433                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6434                         return I40E_SUCCESS; /* already on, skip next steps */
6435
6436                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6437                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6438         } else {
6439                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6440                         return I40E_SUCCESS; /* already off, skip next steps */
6441                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6442         }
6443         /* Write the register */
6444         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6445         /* Check the result */
6446         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6447                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6448                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6449                 if (on) {
6450                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6451                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
6452                                 break;
6453                 } else {
6454                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6455                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6456                                 break;
6457                 }
6458         }
6459         /* Check if it is timeout */
6460         if (j >= I40E_CHK_Q_ENA_COUNT) {
6461                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6462                             (on ? "enable" : "disable"), q_idx);
6463                 return I40E_ERR_TIMEOUT;
6464         }
6465
6466         return I40E_SUCCESS;
6467 }
6468
6469 int
6470 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6471 {
6472         uint32_t reg;
6473         uint16_t j;
6474
6475         /* Wait until the request is finished */
6476         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6477                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6478                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6479                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6480                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6481                         break;
6482         }
6483
6484         if (on) {
6485                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6486                         return I40E_SUCCESS; /* Already on, skip next steps */
6487                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6488         } else {
6489                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6490                         return I40E_SUCCESS; /* Already off, skip next steps */
6491                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6492         }
6493
6494         /* Write the register */
6495         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6496         /* Check the result */
6497         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6498                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6499                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6500                 if (on) {
6501                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6502                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6503                                 break;
6504                 } else {
6505                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6506                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6507                                 break;
6508                 }
6509         }
6510
6511         /* Check if it is timeout */
6512         if (j >= I40E_CHK_Q_ENA_COUNT) {
6513                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6514                             (on ? "enable" : "disable"), q_idx);
6515                 return I40E_ERR_TIMEOUT;
6516         }
6517
6518         return I40E_SUCCESS;
6519 }
6520
6521 /* Initialize VSI for TX */
6522 static int
6523 i40e_dev_tx_init(struct i40e_pf *pf)
6524 {
6525         struct rte_eth_dev_data *data = pf->dev_data;
6526         uint16_t i;
6527         uint32_t ret = I40E_SUCCESS;
6528         struct i40e_tx_queue *txq;
6529
6530         for (i = 0; i < data->nb_tx_queues; i++) {
6531                 txq = data->tx_queues[i];
6532                 if (!txq || !txq->q_set)
6533                         continue;
6534                 ret = i40e_tx_queue_init(txq);
6535                 if (ret != I40E_SUCCESS)
6536                         break;
6537         }
6538         if (ret == I40E_SUCCESS)
6539                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6540                                      ->eth_dev);
6541
6542         return ret;
6543 }
6544
6545 /* Initialize VSI for RX */
6546 static int
6547 i40e_dev_rx_init(struct i40e_pf *pf)
6548 {
6549         struct rte_eth_dev_data *data = pf->dev_data;
6550         int ret = I40E_SUCCESS;
6551         uint16_t i;
6552         struct i40e_rx_queue *rxq;
6553
6554         i40e_pf_config_rss(pf);
6555         for (i = 0; i < data->nb_rx_queues; i++) {
6556                 rxq = data->rx_queues[i];
6557                 if (!rxq || !rxq->q_set)
6558                         continue;
6559
6560                 ret = i40e_rx_queue_init(rxq);
6561                 if (ret != I40E_SUCCESS) {
6562                         PMD_DRV_LOG(ERR,
6563                                 "Failed to do RX queue initialization");
6564                         break;
6565                 }
6566         }
6567         if (ret == I40E_SUCCESS)
6568                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6569                                      ->eth_dev);
6570
6571         return ret;
6572 }
6573
6574 static int
6575 i40e_dev_rxtx_init(struct i40e_pf *pf)
6576 {
6577         int err;
6578
6579         err = i40e_dev_tx_init(pf);
6580         if (err) {
6581                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6582                 return err;
6583         }
6584         err = i40e_dev_rx_init(pf);
6585         if (err) {
6586                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6587                 return err;
6588         }
6589
6590         return err;
6591 }
6592
6593 static int
6594 i40e_vmdq_setup(struct rte_eth_dev *dev)
6595 {
6596         struct rte_eth_conf *conf = &dev->data->dev_conf;
6597         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6598         int i, err, conf_vsis, j, loop;
6599         struct i40e_vsi *vsi;
6600         struct i40e_vmdq_info *vmdq_info;
6601         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6602         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6603
6604         /*
6605          * Disable interrupt to avoid message from VF. Furthermore, it will
6606          * avoid race condition in VSI creation/destroy.
6607          */
6608         i40e_pf_disable_irq0(hw);
6609
6610         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6611                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6612                 return -ENOTSUP;
6613         }
6614
6615         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6616         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6617                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6618                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6619                         pf->max_nb_vmdq_vsi);
6620                 return -ENOTSUP;
6621         }
6622
6623         if (pf->vmdq != NULL) {
6624                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6625                 return 0;
6626         }
6627
6628         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6629                                 sizeof(*vmdq_info) * conf_vsis, 0);
6630
6631         if (pf->vmdq == NULL) {
6632                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6633                 return -ENOMEM;
6634         }
6635
6636         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6637
6638         /* Create VMDQ VSI */
6639         for (i = 0; i < conf_vsis; i++) {
6640                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6641                                 vmdq_conf->enable_loop_back);
6642                 if (vsi == NULL) {
6643                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6644                         err = -1;
6645                         goto err_vsi_setup;
6646                 }
6647                 vmdq_info = &pf->vmdq[i];
6648                 vmdq_info->pf = pf;
6649                 vmdq_info->vsi = vsi;
6650         }
6651         pf->nb_cfg_vmdq_vsi = conf_vsis;
6652
6653         /* Configure Vlan */
6654         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6655         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6656                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6657                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6658                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6659                                         vmdq_conf->pool_map[i].vlan_id, j);
6660
6661                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6662                                                 vmdq_conf->pool_map[i].vlan_id);
6663                                 if (err) {
6664                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6665                                         err = -1;
6666                                         goto err_vsi_setup;
6667                                 }
6668                         }
6669                 }
6670         }
6671
6672         i40e_pf_enable_irq0(hw);
6673
6674         return 0;
6675
6676 err_vsi_setup:
6677         for (i = 0; i < conf_vsis; i++)
6678                 if (pf->vmdq[i].vsi == NULL)
6679                         break;
6680                 else
6681                         i40e_vsi_release(pf->vmdq[i].vsi);
6682
6683         rte_free(pf->vmdq);
6684         pf->vmdq = NULL;
6685         i40e_pf_enable_irq0(hw);
6686         return err;
6687 }
6688
6689 static void
6690 i40e_stat_update_32(struct i40e_hw *hw,
6691                    uint32_t reg,
6692                    bool offset_loaded,
6693                    uint64_t *offset,
6694                    uint64_t *stat)
6695 {
6696         uint64_t new_data;
6697
6698         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6699         if (!offset_loaded)
6700                 *offset = new_data;
6701
6702         if (new_data >= *offset)
6703                 *stat = (uint64_t)(new_data - *offset);
6704         else
6705                 *stat = (uint64_t)((new_data +
6706                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6707 }
6708
6709 static void
6710 i40e_stat_update_48(struct i40e_hw *hw,
6711                    uint32_t hireg,
6712                    uint32_t loreg,
6713                    bool offset_loaded,
6714                    uint64_t *offset,
6715                    uint64_t *stat)
6716 {
6717         uint64_t new_data;
6718
6719         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6720         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6721                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6722
6723         if (!offset_loaded)
6724                 *offset = new_data;
6725
6726         if (new_data >= *offset)
6727                 *stat = new_data - *offset;
6728         else
6729                 *stat = (uint64_t)((new_data +
6730                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6731
6732         *stat &= I40E_48_BIT_MASK;
6733 }
6734
6735 /* Disable IRQ0 */
6736 void
6737 i40e_pf_disable_irq0(struct i40e_hw *hw)
6738 {
6739         /* Disable all interrupt types */
6740         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6741                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6742         I40E_WRITE_FLUSH(hw);
6743 }
6744
6745 /* Enable IRQ0 */
6746 void
6747 i40e_pf_enable_irq0(struct i40e_hw *hw)
6748 {
6749         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6750                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6751                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6752                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6753         I40E_WRITE_FLUSH(hw);
6754 }
6755
6756 static void
6757 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6758 {
6759         /* read pending request and disable first */
6760         i40e_pf_disable_irq0(hw);
6761         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6762         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6763                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6764
6765         if (no_queue)
6766                 /* Link no queues with irq0 */
6767                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6768                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6769 }
6770
6771 static void
6772 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6773 {
6774         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6775         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6776         int i;
6777         uint16_t abs_vf_id;
6778         uint32_t index, offset, val;
6779
6780         if (!pf->vfs)
6781                 return;
6782         /**
6783          * Try to find which VF trigger a reset, use absolute VF id to access
6784          * since the reg is global register.
6785          */
6786         for (i = 0; i < pf->vf_num; i++) {
6787                 abs_vf_id = hw->func_caps.vf_base_id + i;
6788                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6789                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6790                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6791                 /* VFR event occurred */
6792                 if (val & (0x1 << offset)) {
6793                         int ret;
6794
6795                         /* Clear the event first */
6796                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6797                                                         (0x1 << offset));
6798                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6799                         /**
6800                          * Only notify a VF reset event occurred,
6801                          * don't trigger another SW reset
6802                          */
6803                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6804                         if (ret != I40E_SUCCESS)
6805                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6806                 }
6807         }
6808 }
6809
6810 static void
6811 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6812 {
6813         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6814         int i;
6815
6816         for (i = 0; i < pf->vf_num; i++)
6817                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6818 }
6819
6820 static void
6821 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6822 {
6823         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6824         struct i40e_arq_event_info info;
6825         uint16_t pending, opcode;
6826         int ret;
6827
6828         info.buf_len = I40E_AQ_BUF_SZ;
6829         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6830         if (!info.msg_buf) {
6831                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6832                 return;
6833         }
6834
6835         pending = 1;
6836         while (pending) {
6837                 ret = i40e_clean_arq_element(hw, &info, &pending);
6838
6839                 if (ret != I40E_SUCCESS) {
6840                         PMD_DRV_LOG(INFO,
6841                                 "Failed to read msg from AdminQ, aq_err: %u",
6842                                 hw->aq.asq_last_status);
6843                         break;
6844                 }
6845                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6846
6847                 switch (opcode) {
6848                 case i40e_aqc_opc_send_msg_to_pf:
6849                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6850                         i40e_pf_host_handle_vf_msg(dev,
6851                                         rte_le_to_cpu_16(info.desc.retval),
6852                                         rte_le_to_cpu_32(info.desc.cookie_high),
6853                                         rte_le_to_cpu_32(info.desc.cookie_low),
6854                                         info.msg_buf,
6855                                         info.msg_len);
6856                         break;
6857                 case i40e_aqc_opc_get_link_status:
6858                         ret = i40e_dev_link_update(dev, 0);
6859                         if (!ret)
6860                                 rte_eth_dev_callback_process(dev,
6861                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6862                         break;
6863                 default:
6864                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6865                                     opcode);
6866                         break;
6867                 }
6868         }
6869         rte_free(info.msg_buf);
6870 }
6871
6872 static void
6873 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6874 {
6875 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6876 #define I40E_MDD_CLEAR16 0xFFFF
6877         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6878         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6879         bool mdd_detected = false;
6880         struct i40e_pf_vf *vf;
6881         uint32_t reg;
6882         int i;
6883
6884         /* find what triggered the MDD event */
6885         reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6886         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6887                 uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6888                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6889                 uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6890                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6891                 uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6892                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6893                 uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6894                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6895                                         hw->func_caps.base_queue;
6896                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6897                         "queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6898                                 event, queue, pf_num, vf_num, dev->data->name);
6899                 I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6900                 mdd_detected = true;
6901         }
6902         reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6903         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6904                 uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6905                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6906                 uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6907                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6908                 uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6909                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6910                                         hw->func_caps.base_queue;
6911
6912                 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6913                                 "queue %d of function 0x%02x device %s\n",
6914                                         event, queue, func, dev->data->name);
6915                 I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6916                 mdd_detected = true;
6917         }
6918
6919         if (mdd_detected) {
6920                 reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6921                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6922                         I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6923                         PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6924                 }
6925                 reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6926                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6927                         I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6928                                         I40E_MDD_CLEAR16);
6929                         PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6930                 }
6931         }
6932
6933         /* see if one of the VFs needs its hand slapped */
6934         for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6935                 vf = &pf->vfs[i];
6936                 reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6937                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6938                         I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6939                                         I40E_MDD_CLEAR16);
6940                         vf->num_mdd_events++;
6941                         PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6942                                         PRIu64 "times\n",
6943                                         i, vf->num_mdd_events);
6944                 }
6945
6946                 reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6947                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6948                         I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6949                                         I40E_MDD_CLEAR16);
6950                         vf->num_mdd_events++;
6951                         PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6952                                         PRIu64 "times\n",
6953                                         i, vf->num_mdd_events);
6954                 }
6955         }
6956 }
6957
6958 /**
6959  * Interrupt handler triggered by NIC  for handling
6960  * specific interrupt.
6961  *
6962  * @param handle
6963  *  Pointer to interrupt handle.
6964  * @param param
6965  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6966  *
6967  * @return
6968  *  void
6969  */
6970 static void
6971 i40e_dev_interrupt_handler(void *param)
6972 {
6973         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6974         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6975         uint32_t icr0;
6976
6977         /* Disable interrupt */
6978         i40e_pf_disable_irq0(hw);
6979
6980         /* read out interrupt causes */
6981         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6982
6983         /* No interrupt event indicated */
6984         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6985                 PMD_DRV_LOG(INFO, "No interrupt event");
6986                 goto done;
6987         }
6988         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6989                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6990         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6991                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6992                 i40e_handle_mdd_event(dev);
6993         }
6994         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6995                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6996         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6997                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6998         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6999                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7000         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7001                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
7002         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7003                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7004
7005         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7006                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7007                 i40e_dev_handle_vfr_event(dev);
7008         }
7009         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7010                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
7011                 i40e_dev_handle_aq_msg(dev);
7012         }
7013
7014 done:
7015         /* Enable interrupt */
7016         i40e_pf_enable_irq0(hw);
7017 }
7018
7019 static void
7020 i40e_dev_alarm_handler(void *param)
7021 {
7022         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
7023         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7024         uint32_t icr0;
7025
7026         /* Disable interrupt */
7027         i40e_pf_disable_irq0(hw);
7028
7029         /* read out interrupt causes */
7030         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
7031
7032         /* No interrupt event indicated */
7033         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
7034                 goto done;
7035         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
7036                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
7037         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
7038                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
7039                 i40e_handle_mdd_event(dev);
7040         }
7041         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
7042                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
7043         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
7044                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
7045         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
7046                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
7047         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
7048                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
7049         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
7050                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
7051
7052         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
7053                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
7054                 i40e_dev_handle_vfr_event(dev);
7055         }
7056         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
7057                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
7058                 i40e_dev_handle_aq_msg(dev);
7059         }
7060
7061 done:
7062         /* Enable interrupt */
7063         i40e_pf_enable_irq0(hw);
7064         rte_eal_alarm_set(I40E_ALARM_INTERVAL,
7065                           i40e_dev_alarm_handler, dev);
7066 }
7067
7068 int
7069 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
7070                          struct i40e_macvlan_filter *filter,
7071                          int total)
7072 {
7073         int ele_num, ele_buff_size;
7074         int num, actual_num, i;
7075         uint16_t flags;
7076         int ret = I40E_SUCCESS;
7077         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7078         struct i40e_aqc_add_macvlan_element_data *req_list;
7079
7080         if (filter == NULL  || total == 0)
7081                 return I40E_ERR_PARAM;
7082         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7083         ele_buff_size = hw->aq.asq_buf_size;
7084
7085         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
7086         if (req_list == NULL) {
7087                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7088                 return I40E_ERR_NO_MEMORY;
7089         }
7090
7091         num = 0;
7092         do {
7093                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7094                 memset(req_list, 0, ele_buff_size);
7095
7096                 for (i = 0; i < actual_num; i++) {
7097                         rte_memcpy(req_list[i].mac_addr,
7098                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7099                         req_list[i].vlan_tag =
7100                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7101
7102                         switch (filter[num + i].filter_type) {
7103                         case RTE_MAC_PERFECT_MATCH:
7104                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
7105                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7106                                 break;
7107                         case RTE_MACVLAN_PERFECT_MATCH:
7108                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7109                                 break;
7110                         case RTE_MAC_HASH_MATCH:
7111                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
7112                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
7113                                 break;
7114                         case RTE_MACVLAN_HASH_MATCH:
7115                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
7116                                 break;
7117                         default:
7118                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
7119                                 ret = I40E_ERR_PARAM;
7120                                 goto DONE;
7121                         }
7122
7123                         req_list[i].queue_number = 0;
7124
7125                         req_list[i].flags = rte_cpu_to_le_16(flags);
7126                 }
7127
7128                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
7129                                                 actual_num, NULL);
7130                 if (ret != I40E_SUCCESS) {
7131                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
7132                         goto DONE;
7133                 }
7134                 num += actual_num;
7135         } while (num < total);
7136
7137 DONE:
7138         rte_free(req_list);
7139         return ret;
7140 }
7141
7142 int
7143 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
7144                             struct i40e_macvlan_filter *filter,
7145                             int total)
7146 {
7147         int ele_num, ele_buff_size;
7148         int num, actual_num, i;
7149         uint16_t flags;
7150         int ret = I40E_SUCCESS;
7151         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7152         struct i40e_aqc_remove_macvlan_element_data *req_list;
7153
7154         if (filter == NULL  || total == 0)
7155                 return I40E_ERR_PARAM;
7156
7157         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7158         ele_buff_size = hw->aq.asq_buf_size;
7159
7160         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7161         if (req_list == NULL) {
7162                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
7163                 return I40E_ERR_NO_MEMORY;
7164         }
7165
7166         num = 0;
7167         do {
7168                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7169                 memset(req_list, 0, ele_buff_size);
7170
7171                 for (i = 0; i < actual_num; i++) {
7172                         rte_memcpy(req_list[i].mac_addr,
7173                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
7174                         req_list[i].vlan_tag =
7175                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
7176
7177                         switch (filter[num + i].filter_type) {
7178                         case RTE_MAC_PERFECT_MATCH:
7179                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7180                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7181                                 break;
7182                         case RTE_MACVLAN_PERFECT_MATCH:
7183                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7184                                 break;
7185                         case RTE_MAC_HASH_MATCH:
7186                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7187                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7188                                 break;
7189                         case RTE_MACVLAN_HASH_MATCH:
7190                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7191                                 break;
7192                         default:
7193                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7194                                 ret = I40E_ERR_PARAM;
7195                                 goto DONE;
7196                         }
7197                         req_list[i].flags = rte_cpu_to_le_16(flags);
7198                 }
7199
7200                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7201                                                 actual_num, NULL);
7202                 if (ret != I40E_SUCCESS) {
7203                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7204                         goto DONE;
7205                 }
7206                 num += actual_num;
7207         } while (num < total);
7208
7209 DONE:
7210         rte_free(req_list);
7211         return ret;
7212 }
7213
7214 /* Find out specific MAC filter */
7215 static struct i40e_mac_filter *
7216 i40e_find_mac_filter(struct i40e_vsi *vsi,
7217                          struct rte_ether_addr *macaddr)
7218 {
7219         struct i40e_mac_filter *f;
7220
7221         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7222                 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7223                         return f;
7224         }
7225
7226         return NULL;
7227 }
7228
7229 static bool
7230 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7231                          uint16_t vlan_id)
7232 {
7233         uint32_t vid_idx, vid_bit;
7234
7235         if (vlan_id > ETH_VLAN_ID_MAX)
7236                 return 0;
7237
7238         vid_idx = I40E_VFTA_IDX(vlan_id);
7239         vid_bit = I40E_VFTA_BIT(vlan_id);
7240
7241         if (vsi->vfta[vid_idx] & vid_bit)
7242                 return 1;
7243         else
7244                 return 0;
7245 }
7246
7247 static void
7248 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7249                        uint16_t vlan_id, bool on)
7250 {
7251         uint32_t vid_idx, vid_bit;
7252
7253         vid_idx = I40E_VFTA_IDX(vlan_id);
7254         vid_bit = I40E_VFTA_BIT(vlan_id);
7255
7256         if (on)
7257                 vsi->vfta[vid_idx] |= vid_bit;
7258         else
7259                 vsi->vfta[vid_idx] &= ~vid_bit;
7260 }
7261
7262 void
7263 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7264                      uint16_t vlan_id, bool on)
7265 {
7266         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7267         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7268         int ret;
7269
7270         if (vlan_id > ETH_VLAN_ID_MAX)
7271                 return;
7272
7273         i40e_store_vlan_filter(vsi, vlan_id, on);
7274
7275         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7276                 return;
7277
7278         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7279
7280         if (on) {
7281                 ret = i40e_aq_add_vlan(hw, vsi->seid,
7282                                        &vlan_data, 1, NULL);
7283                 if (ret != I40E_SUCCESS)
7284                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7285         } else {
7286                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
7287                                           &vlan_data, 1, NULL);
7288                 if (ret != I40E_SUCCESS)
7289                         PMD_DRV_LOG(ERR,
7290                                     "Failed to remove vlan filter");
7291         }
7292 }
7293
7294 /**
7295  * Find all vlan options for specific mac addr,
7296  * return with actual vlan found.
7297  */
7298 int
7299 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7300                            struct i40e_macvlan_filter *mv_f,
7301                            int num, struct rte_ether_addr *addr)
7302 {
7303         int i;
7304         uint32_t j, k;
7305
7306         /**
7307          * Not to use i40e_find_vlan_filter to decrease the loop time,
7308          * although the code looks complex.
7309           */
7310         if (num < vsi->vlan_num)
7311                 return I40E_ERR_PARAM;
7312
7313         i = 0;
7314         for (j = 0; j < I40E_VFTA_SIZE; j++) {
7315                 if (vsi->vfta[j]) {
7316                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7317                                 if (vsi->vfta[j] & (1 << k)) {
7318                                         if (i > num - 1) {
7319                                                 PMD_DRV_LOG(ERR,
7320                                                         "vlan number doesn't match");
7321                                                 return I40E_ERR_PARAM;
7322                                         }
7323                                         rte_memcpy(&mv_f[i].macaddr,
7324                                                         addr, ETH_ADDR_LEN);
7325                                         mv_f[i].vlan_id =
7326                                                 j * I40E_UINT32_BIT_SIZE + k;
7327                                         i++;
7328                                 }
7329                         }
7330                 }
7331         }
7332         return I40E_SUCCESS;
7333 }
7334
7335 static inline int
7336 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7337                            struct i40e_macvlan_filter *mv_f,
7338                            int num,
7339                            uint16_t vlan)
7340 {
7341         int i = 0;
7342         struct i40e_mac_filter *f;
7343
7344         if (num < vsi->mac_num)
7345                 return I40E_ERR_PARAM;
7346
7347         TAILQ_FOREACH(f, &vsi->mac_list, next) {
7348                 if (i > num - 1) {
7349                         PMD_DRV_LOG(ERR, "buffer number not match");
7350                         return I40E_ERR_PARAM;
7351                 }
7352                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7353                                 ETH_ADDR_LEN);
7354                 mv_f[i].vlan_id = vlan;
7355                 mv_f[i].filter_type = f->mac_info.filter_type;
7356                 i++;
7357         }
7358
7359         return I40E_SUCCESS;
7360 }
7361
7362 static int
7363 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7364 {
7365         int i, j, num;
7366         struct i40e_mac_filter *f;
7367         struct i40e_macvlan_filter *mv_f;
7368         int ret = I40E_SUCCESS;
7369
7370         if (vsi == NULL || vsi->mac_num == 0)
7371                 return I40E_ERR_PARAM;
7372
7373         /* Case that no vlan is set */
7374         if (vsi->vlan_num == 0)
7375                 num = vsi->mac_num;
7376         else
7377                 num = vsi->mac_num * vsi->vlan_num;
7378
7379         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7380         if (mv_f == NULL) {
7381                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7382                 return I40E_ERR_NO_MEMORY;
7383         }
7384
7385         i = 0;
7386         if (vsi->vlan_num == 0) {
7387                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7388                         rte_memcpy(&mv_f[i].macaddr,
7389                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
7390                         mv_f[i].filter_type = f->mac_info.filter_type;
7391                         mv_f[i].vlan_id = 0;
7392                         i++;
7393                 }
7394         } else {
7395                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
7396                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7397                                         vsi->vlan_num, &f->mac_info.mac_addr);
7398                         if (ret != I40E_SUCCESS)
7399                                 goto DONE;
7400                         for (j = i; j < i + vsi->vlan_num; j++)
7401                                 mv_f[j].filter_type = f->mac_info.filter_type;
7402                         i += vsi->vlan_num;
7403                 }
7404         }
7405
7406         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7407 DONE:
7408         rte_free(mv_f);
7409
7410         return ret;
7411 }
7412
7413 int
7414 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7415 {
7416         struct i40e_macvlan_filter *mv_f;
7417         int mac_num;
7418         int ret = I40E_SUCCESS;
7419
7420         if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7421                 return I40E_ERR_PARAM;
7422
7423         /* If it's already set, just return */
7424         if (i40e_find_vlan_filter(vsi,vlan))
7425                 return I40E_SUCCESS;
7426
7427         mac_num = vsi->mac_num;
7428
7429         if (mac_num == 0) {
7430                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7431                 return I40E_ERR_PARAM;
7432         }
7433
7434         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7435
7436         if (mv_f == NULL) {
7437                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7438                 return I40E_ERR_NO_MEMORY;
7439         }
7440
7441         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7442
7443         if (ret != I40E_SUCCESS)
7444                 goto DONE;
7445
7446         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7447
7448         if (ret != I40E_SUCCESS)
7449                 goto DONE;
7450
7451         i40e_set_vlan_filter(vsi, vlan, 1);
7452
7453         vsi->vlan_num++;
7454         ret = I40E_SUCCESS;
7455 DONE:
7456         rte_free(mv_f);
7457         return ret;
7458 }
7459
7460 int
7461 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7462 {
7463         struct i40e_macvlan_filter *mv_f;
7464         int mac_num;
7465         int ret = I40E_SUCCESS;
7466
7467         /**
7468          * Vlan 0 is the generic filter for untagged packets
7469          * and can't be removed.
7470          */
7471         if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7472                 return I40E_ERR_PARAM;
7473
7474         /* If can't find it, just return */
7475         if (!i40e_find_vlan_filter(vsi, vlan))
7476                 return I40E_ERR_PARAM;
7477
7478         mac_num = vsi->mac_num;
7479
7480         if (mac_num == 0) {
7481                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7482                 return I40E_ERR_PARAM;
7483         }
7484
7485         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7486
7487         if (mv_f == NULL) {
7488                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7489                 return I40E_ERR_NO_MEMORY;
7490         }
7491
7492         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7493
7494         if (ret != I40E_SUCCESS)
7495                 goto DONE;
7496
7497         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7498
7499         if (ret != I40E_SUCCESS)
7500                 goto DONE;
7501
7502         /* This is last vlan to remove, replace all mac filter with vlan 0 */
7503         if (vsi->vlan_num == 1) {
7504                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7505                 if (ret != I40E_SUCCESS)
7506                         goto DONE;
7507
7508                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7509                 if (ret != I40E_SUCCESS)
7510                         goto DONE;
7511         }
7512
7513         i40e_set_vlan_filter(vsi, vlan, 0);
7514
7515         vsi->vlan_num--;
7516         ret = I40E_SUCCESS;
7517 DONE:
7518         rte_free(mv_f);
7519         return ret;
7520 }
7521
7522 int
7523 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7524 {
7525         struct i40e_mac_filter *f;
7526         struct i40e_macvlan_filter *mv_f;
7527         int i, vlan_num = 0;
7528         int ret = I40E_SUCCESS;
7529
7530         /* If it's add and we've config it, return */
7531         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7532         if (f != NULL)
7533                 return I40E_SUCCESS;
7534         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
7535                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
7536
7537                 /**
7538                  * If vlan_num is 0, that's the first time to add mac,
7539                  * set mask for vlan_id 0.
7540                  */
7541                 if (vsi->vlan_num == 0) {
7542                         i40e_set_vlan_filter(vsi, 0, 1);
7543                         vsi->vlan_num = 1;
7544                 }
7545                 vlan_num = vsi->vlan_num;
7546         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
7547                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
7548                 vlan_num = 1;
7549
7550         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7551         if (mv_f == NULL) {
7552                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7553                 return I40E_ERR_NO_MEMORY;
7554         }
7555
7556         for (i = 0; i < vlan_num; i++) {
7557                 mv_f[i].filter_type = mac_filter->filter_type;
7558                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7559                                 ETH_ADDR_LEN);
7560         }
7561
7562         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7563                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
7564                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7565                                         &mac_filter->mac_addr);
7566                 if (ret != I40E_SUCCESS)
7567                         goto DONE;
7568         }
7569
7570         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7571         if (ret != I40E_SUCCESS)
7572                 goto DONE;
7573
7574         /* Add the mac addr into mac list */
7575         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7576         if (f == NULL) {
7577                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7578                 ret = I40E_ERR_NO_MEMORY;
7579                 goto DONE;
7580         }
7581         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7582                         ETH_ADDR_LEN);
7583         f->mac_info.filter_type = mac_filter->filter_type;
7584         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7585         vsi->mac_num++;
7586
7587         ret = I40E_SUCCESS;
7588 DONE:
7589         rte_free(mv_f);
7590
7591         return ret;
7592 }
7593
7594 int
7595 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7596 {
7597         struct i40e_mac_filter *f;
7598         struct i40e_macvlan_filter *mv_f;
7599         int i, vlan_num;
7600         enum rte_mac_filter_type filter_type;
7601         int ret = I40E_SUCCESS;
7602
7603         /* Can't find it, return an error */
7604         f = i40e_find_mac_filter(vsi, addr);
7605         if (f == NULL)
7606                 return I40E_ERR_PARAM;
7607
7608         vlan_num = vsi->vlan_num;
7609         filter_type = f->mac_info.filter_type;
7610         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7611                 filter_type == RTE_MACVLAN_HASH_MATCH) {
7612                 if (vlan_num == 0) {
7613                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7614                         return I40E_ERR_PARAM;
7615                 }
7616         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7617                         filter_type == RTE_MAC_HASH_MATCH)
7618                 vlan_num = 1;
7619
7620         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7621         if (mv_f == NULL) {
7622                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7623                 return I40E_ERR_NO_MEMORY;
7624         }
7625
7626         for (i = 0; i < vlan_num; i++) {
7627                 mv_f[i].filter_type = filter_type;
7628                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7629                                 ETH_ADDR_LEN);
7630         }
7631         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7632                         filter_type == RTE_MACVLAN_HASH_MATCH) {
7633                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7634                 if (ret != I40E_SUCCESS)
7635                         goto DONE;
7636         }
7637
7638         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7639         if (ret != I40E_SUCCESS)
7640                 goto DONE;
7641
7642         /* Remove the mac addr into mac list */
7643         TAILQ_REMOVE(&vsi->mac_list, f, next);
7644         rte_free(f);
7645         vsi->mac_num--;
7646
7647         ret = I40E_SUCCESS;
7648 DONE:
7649         rte_free(mv_f);
7650         return ret;
7651 }
7652
7653 /* Configure hash enable flags for RSS */
7654 uint64_t
7655 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7656 {
7657         uint64_t hena = 0;
7658         int i;
7659
7660         if (!flags)
7661                 return hena;
7662
7663         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7664                 if (flags & (1ULL << i))
7665                         hena |= adapter->pctypes_tbl[i];
7666         }
7667
7668         return hena;
7669 }
7670
7671 /* Parse the hash enable flags */
7672 uint64_t
7673 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7674 {
7675         uint64_t rss_hf = 0;
7676
7677         if (!flags)
7678                 return rss_hf;
7679         int i;
7680
7681         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7682                 if (flags & adapter->pctypes_tbl[i])
7683                         rss_hf |= (1ULL << i);
7684         }
7685         return rss_hf;
7686 }
7687
7688 /* Disable RSS */
7689 static void
7690 i40e_pf_disable_rss(struct i40e_pf *pf)
7691 {
7692         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7693
7694         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7695         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7696         I40E_WRITE_FLUSH(hw);
7697 }
7698
7699 int
7700 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7701 {
7702         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7703         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7704         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7705                            I40E_VFQF_HKEY_MAX_INDEX :
7706                            I40E_PFQF_HKEY_MAX_INDEX;
7707         int ret = 0;
7708
7709         if (!key || key_len == 0) {
7710                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7711                 return 0;
7712         } else if (key_len != (key_idx + 1) *
7713                 sizeof(uint32_t)) {
7714                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7715                 return -EINVAL;
7716         }
7717
7718         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7719                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7720                         (struct i40e_aqc_get_set_rss_key_data *)key;
7721
7722                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7723                 if (ret)
7724                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7725         } else {
7726                 uint32_t *hash_key = (uint32_t *)key;
7727                 uint16_t i;
7728
7729                 if (vsi->type == I40E_VSI_SRIOV) {
7730                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7731                                 I40E_WRITE_REG(
7732                                         hw,
7733                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7734                                         hash_key[i]);
7735
7736                 } else {
7737                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7738                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7739                                                hash_key[i]);
7740                 }
7741                 I40E_WRITE_FLUSH(hw);
7742         }
7743
7744         return ret;
7745 }
7746
7747 static int
7748 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7749 {
7750         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7751         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7752         uint32_t reg;
7753         int ret;
7754
7755         if (!key || !key_len)
7756                 return 0;
7757
7758         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7759                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7760                         (struct i40e_aqc_get_set_rss_key_data *)key);
7761                 if (ret) {
7762                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7763                         return ret;
7764                 }
7765         } else {
7766                 uint32_t *key_dw = (uint32_t *)key;
7767                 uint16_t i;
7768
7769                 if (vsi->type == I40E_VSI_SRIOV) {
7770                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7771                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7772                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7773                         }
7774                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7775                                    sizeof(uint32_t);
7776                 } else {
7777                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7778                                 reg = I40E_PFQF_HKEY(i);
7779                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7780                         }
7781                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7782                                    sizeof(uint32_t);
7783                 }
7784         }
7785         return 0;
7786 }
7787
7788 static int
7789 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7790 {
7791         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7792         uint64_t hena;
7793         int ret;
7794
7795         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7796                                rss_conf->rss_key_len);
7797         if (ret)
7798                 return ret;
7799
7800         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7801         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7802         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7803         I40E_WRITE_FLUSH(hw);
7804
7805         return 0;
7806 }
7807
7808 static int
7809 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7810                          struct rte_eth_rss_conf *rss_conf)
7811 {
7812         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7813         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7814         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7815         uint64_t hena;
7816
7817         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7818         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7819
7820         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7821                 if (rss_hf != 0) /* Enable RSS */
7822                         return -EINVAL;
7823                 return 0; /* Nothing to do */
7824         }
7825         /* RSS enabled */
7826         if (rss_hf == 0) /* Disable RSS */
7827                 return -EINVAL;
7828
7829         return i40e_hw_rss_hash_set(pf, rss_conf);
7830 }
7831
7832 static int
7833 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7834                            struct rte_eth_rss_conf *rss_conf)
7835 {
7836         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7837         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7838         uint64_t hena;
7839         int ret;
7840
7841         if (!rss_conf)
7842                 return -EINVAL;
7843
7844         ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7845                          &rss_conf->rss_key_len);
7846         if (ret)
7847                 return ret;
7848
7849         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7850         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7851         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7852
7853         return 0;
7854 }
7855
7856 static int
7857 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7858 {
7859         switch (filter_type) {
7860         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7861                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7862                 break;
7863         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7864                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7865                 break;
7866         case RTE_TUNNEL_FILTER_IMAC_TENID:
7867                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7868                 break;
7869         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7870                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7871                 break;
7872         case ETH_TUNNEL_FILTER_IMAC:
7873                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7874                 break;
7875         case ETH_TUNNEL_FILTER_OIP:
7876                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7877                 break;
7878         case ETH_TUNNEL_FILTER_IIP:
7879                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7880                 break;
7881         default:
7882                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7883                 return -EINVAL;
7884         }
7885
7886         return 0;
7887 }
7888
7889 /* Convert tunnel filter structure */
7890 static int
7891 i40e_tunnel_filter_convert(
7892         struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7893         struct i40e_tunnel_filter *tunnel_filter)
7894 {
7895         rte_ether_addr_copy((struct rte_ether_addr *)
7896                         &cld_filter->element.outer_mac,
7897                 (struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7898         rte_ether_addr_copy((struct rte_ether_addr *)
7899                         &cld_filter->element.inner_mac,
7900                 (struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7901         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7902         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7903              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7904             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7905                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7906         else
7907                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7908         tunnel_filter->input.flags = cld_filter->element.flags;
7909         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7910         tunnel_filter->queue = cld_filter->element.queue_number;
7911         rte_memcpy(tunnel_filter->input.general_fields,
7912                    cld_filter->general_fields,
7913                    sizeof(cld_filter->general_fields));
7914
7915         return 0;
7916 }
7917
7918 /* Check if there exists the tunnel filter */
7919 struct i40e_tunnel_filter *
7920 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7921                              const struct i40e_tunnel_filter_input *input)
7922 {
7923         int ret;
7924
7925         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7926         if (ret < 0)
7927                 return NULL;
7928
7929         return tunnel_rule->hash_map[ret];
7930 }
7931
7932 /* Add a tunnel filter into the SW list */
7933 static int
7934 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7935                              struct i40e_tunnel_filter *tunnel_filter)
7936 {
7937         struct i40e_tunnel_rule *rule = &pf->tunnel;
7938         int ret;
7939
7940         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7941         if (ret < 0) {
7942                 PMD_DRV_LOG(ERR,
7943                             "Failed to insert tunnel filter to hash table %d!",
7944                             ret);
7945                 return ret;
7946         }
7947         rule->hash_map[ret] = tunnel_filter;
7948
7949         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7950
7951         return 0;
7952 }
7953
7954 /* Delete a tunnel filter from the SW list */
7955 int
7956 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7957                           struct i40e_tunnel_filter_input *input)
7958 {
7959         struct i40e_tunnel_rule *rule = &pf->tunnel;
7960         struct i40e_tunnel_filter *tunnel_filter;
7961         int ret;
7962
7963         ret = rte_hash_del_key(rule->hash_table, input);
7964         if (ret < 0) {
7965                 PMD_DRV_LOG(ERR,
7966                             "Failed to delete tunnel filter to hash table %d!",
7967                             ret);
7968                 return ret;
7969         }
7970         tunnel_filter = rule->hash_map[ret];
7971         rule->hash_map[ret] = NULL;
7972
7973         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7974         rte_free(tunnel_filter);
7975
7976         return 0;
7977 }
7978
7979 int
7980 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7981                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7982                         uint8_t add)
7983 {
7984         uint16_t ip_type;
7985         uint32_t ipv4_addr, ipv4_addr_le;
7986         uint8_t i, tun_type = 0;
7987         /* internal varialbe to convert ipv6 byte order */
7988         uint32_t convert_ipv6[4];
7989         int val, ret = 0;
7990         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7991         struct i40e_vsi *vsi = pf->main_vsi;
7992         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
7993         struct i40e_aqc_cloud_filters_element_bb *pfilter;
7994         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7995         struct i40e_tunnel_filter *tunnel, *node;
7996         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7997
7998         cld_filter = rte_zmalloc("tunnel_filter",
7999                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8000         0);
8001
8002         if (NULL == cld_filter) {
8003                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8004                 return -ENOMEM;
8005         }
8006         pfilter = cld_filter;
8007
8008         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8009                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8010         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8011                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8012
8013         pfilter->element.inner_vlan =
8014                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8015         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
8016                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8017                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8018                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8019                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8020                                 &ipv4_addr_le,
8021                                 sizeof(pfilter->element.ipaddr.v4.data));
8022         } else {
8023                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8024                 for (i = 0; i < 4; i++) {
8025                         convert_ipv6[i] =
8026                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
8027                 }
8028                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8029                            &convert_ipv6,
8030                            sizeof(pfilter->element.ipaddr.v6.data));
8031         }
8032
8033         /* check tunneled type */
8034         switch (tunnel_filter->tunnel_type) {
8035         case RTE_TUNNEL_TYPE_VXLAN:
8036                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8037                 break;
8038         case RTE_TUNNEL_TYPE_NVGRE:
8039                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8040                 break;
8041         case RTE_TUNNEL_TYPE_IP_IN_GRE:
8042                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8043                 break;
8044         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8045                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
8046                 break;
8047         default:
8048                 /* Other tunnel types is not supported. */
8049                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8050                 rte_free(cld_filter);
8051                 return -EINVAL;
8052         }
8053
8054         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8055                                        &pfilter->element.flags);
8056         if (val < 0) {
8057                 rte_free(cld_filter);
8058                 return -EINVAL;
8059         }
8060
8061         pfilter->element.flags |= rte_cpu_to_le_16(
8062                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8063                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8064         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8065         pfilter->element.queue_number =
8066                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8067
8068         /* Check if there is the filter in SW list */
8069         memset(&check_filter, 0, sizeof(check_filter));
8070         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8071         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8072         if (add && node) {
8073                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8074                 rte_free(cld_filter);
8075                 return -EINVAL;
8076         }
8077
8078         if (!add && !node) {
8079                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8080                 rte_free(cld_filter);
8081                 return -EINVAL;
8082         }
8083
8084         if (add) {
8085                 ret = i40e_aq_add_cloud_filters(hw,
8086                                         vsi->seid, &cld_filter->element, 1);
8087                 if (ret < 0) {
8088                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8089                         rte_free(cld_filter);
8090                         return -ENOTSUP;
8091                 }
8092                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8093                 if (tunnel == NULL) {
8094                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8095                         rte_free(cld_filter);
8096                         return -ENOMEM;
8097                 }
8098
8099                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8100                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8101                 if (ret < 0)
8102                         rte_free(tunnel);
8103         } else {
8104                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8105                                                    &cld_filter->element, 1);
8106                 if (ret < 0) {
8107                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8108                         rte_free(cld_filter);
8109                         return -ENOTSUP;
8110                 }
8111                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8112         }
8113
8114         rte_free(cld_filter);
8115         return ret;
8116 }
8117
8118 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
8119 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
8120 #define I40E_TR_GENEVE_KEY_MASK                 0x8
8121 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
8122 #define I40E_TR_GRE_KEY_MASK                    0x400
8123 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
8124 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
8125 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
8126 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
8127 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
8128 #define I40E_DIRECTION_INGRESS_KEY              0x8000
8129 #define I40E_TR_L4_TYPE_TCP                     0x2
8130 #define I40E_TR_L4_TYPE_UDP                     0x4
8131 #define I40E_TR_L4_TYPE_SCTP                    0x8
8132
8133 static enum
8134 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
8135 {
8136         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8137         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8138         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8139         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8140         enum i40e_status_code status = I40E_SUCCESS;
8141
8142         if (pf->support_multi_driver) {
8143                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8144                 return I40E_NOT_SUPPORTED;
8145         }
8146
8147         memset(&filter_replace, 0,
8148                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8149         memset(&filter_replace_buf, 0,
8150                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8151
8152         /* create L1 filter */
8153         filter_replace.old_filter_type =
8154                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8155         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8156         filter_replace.tr_bit = 0;
8157
8158         /* Prepare the buffer, 3 entries */
8159         filter_replace_buf.data[0] =
8160                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8161         filter_replace_buf.data[0] |=
8162                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8163         filter_replace_buf.data[2] = 0xFF;
8164         filter_replace_buf.data[3] = 0xFF;
8165         filter_replace_buf.data[4] =
8166                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8167         filter_replace_buf.data[4] |=
8168                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8169         filter_replace_buf.data[7] = 0xF0;
8170         filter_replace_buf.data[8]
8171                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
8172         filter_replace_buf.data[8] |=
8173                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8174         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
8175                 I40E_TR_GENEVE_KEY_MASK |
8176                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
8177         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
8178                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
8179                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
8180
8181         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8182                                                &filter_replace_buf);
8183         if (!status && (filter_replace.old_filter_type !=
8184                         filter_replace.new_filter_type))
8185                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8186                             " original: 0x%x, new: 0x%x",
8187                             dev->device->name,
8188                             filter_replace.old_filter_type,
8189                             filter_replace.new_filter_type);
8190
8191         return status;
8192 }
8193
8194 static enum
8195 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
8196 {
8197         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8198         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8199         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8200         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8201         enum i40e_status_code status = I40E_SUCCESS;
8202
8203         if (pf->support_multi_driver) {
8204                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8205                 return I40E_NOT_SUPPORTED;
8206         }
8207
8208         /* For MPLSoUDP */
8209         memset(&filter_replace, 0,
8210                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8211         memset(&filter_replace_buf, 0,
8212                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8213         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8214                 I40E_AQC_MIRROR_CLOUD_FILTER;
8215         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8216         filter_replace.new_filter_type =
8217                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8218         /* Prepare the buffer, 2 entries */
8219         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8220         filter_replace_buf.data[0] |=
8221                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8222         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8223         filter_replace_buf.data[4] |=
8224                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8225         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8226                                                &filter_replace_buf);
8227         if (status < 0)
8228                 return status;
8229         if (filter_replace.old_filter_type !=
8230             filter_replace.new_filter_type)
8231                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8232                             " original: 0x%x, new: 0x%x",
8233                             dev->device->name,
8234                             filter_replace.old_filter_type,
8235                             filter_replace.new_filter_type);
8236
8237         /* For MPLSoGRE */
8238         memset(&filter_replace, 0,
8239                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8240         memset(&filter_replace_buf, 0,
8241                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8242
8243         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8244                 I40E_AQC_MIRROR_CLOUD_FILTER;
8245         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
8246         filter_replace.new_filter_type =
8247                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8248         /* Prepare the buffer, 2 entries */
8249         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8250         filter_replace_buf.data[0] |=
8251                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8252         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8253         filter_replace_buf.data[4] |=
8254                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8255
8256         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8257                                                &filter_replace_buf);
8258         if (!status && (filter_replace.old_filter_type !=
8259                         filter_replace.new_filter_type))
8260                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8261                             " original: 0x%x, new: 0x%x",
8262                             dev->device->name,
8263                             filter_replace.old_filter_type,
8264                             filter_replace.new_filter_type);
8265
8266         return status;
8267 }
8268
8269 static enum i40e_status_code
8270 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8271 {
8272         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8273         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8274         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8275         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8276         enum i40e_status_code status = I40E_SUCCESS;
8277
8278         if (pf->support_multi_driver) {
8279                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8280                 return I40E_NOT_SUPPORTED;
8281         }
8282
8283         /* For GTP-C */
8284         memset(&filter_replace, 0,
8285                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8286         memset(&filter_replace_buf, 0,
8287                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8288         /* create L1 filter */
8289         filter_replace.old_filter_type =
8290                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8291         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8292         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8293                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8294         /* Prepare the buffer, 2 entries */
8295         filter_replace_buf.data[0] =
8296                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8297         filter_replace_buf.data[0] |=
8298                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8299         filter_replace_buf.data[2] = 0xFF;
8300         filter_replace_buf.data[3] = 0xFF;
8301         filter_replace_buf.data[4] =
8302                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8303         filter_replace_buf.data[4] |=
8304                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8305         filter_replace_buf.data[6] = 0xFF;
8306         filter_replace_buf.data[7] = 0xFF;
8307         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8308                                                &filter_replace_buf);
8309         if (status < 0)
8310                 return status;
8311         if (filter_replace.old_filter_type !=
8312             filter_replace.new_filter_type)
8313                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8314                             " original: 0x%x, new: 0x%x",
8315                             dev->device->name,
8316                             filter_replace.old_filter_type,
8317                             filter_replace.new_filter_type);
8318
8319         /* for GTP-U */
8320         memset(&filter_replace, 0,
8321                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8322         memset(&filter_replace_buf, 0,
8323                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8324         /* create L1 filter */
8325         filter_replace.old_filter_type =
8326                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8327         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8328         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8329                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8330         /* Prepare the buffer, 2 entries */
8331         filter_replace_buf.data[0] =
8332                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8333         filter_replace_buf.data[0] |=
8334                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8335         filter_replace_buf.data[2] = 0xFF;
8336         filter_replace_buf.data[3] = 0xFF;
8337         filter_replace_buf.data[4] =
8338                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8339         filter_replace_buf.data[4] |=
8340                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8341         filter_replace_buf.data[6] = 0xFF;
8342         filter_replace_buf.data[7] = 0xFF;
8343
8344         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8345                                                &filter_replace_buf);
8346         if (!status && (filter_replace.old_filter_type !=
8347                         filter_replace.new_filter_type))
8348                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8349                             " original: 0x%x, new: 0x%x",
8350                             dev->device->name,
8351                             filter_replace.old_filter_type,
8352                             filter_replace.new_filter_type);
8353
8354         return status;
8355 }
8356
8357 static enum
8358 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8359 {
8360         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8361         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8362         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8363         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8364         enum i40e_status_code status = I40E_SUCCESS;
8365
8366         if (pf->support_multi_driver) {
8367                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8368                 return I40E_NOT_SUPPORTED;
8369         }
8370
8371         /* for GTP-C */
8372         memset(&filter_replace, 0,
8373                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8374         memset(&filter_replace_buf, 0,
8375                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8376         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8377         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8378         filter_replace.new_filter_type =
8379                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8380         /* Prepare the buffer, 2 entries */
8381         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8382         filter_replace_buf.data[0] |=
8383                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8384         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8385         filter_replace_buf.data[4] |=
8386                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8387         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8388                                                &filter_replace_buf);
8389         if (status < 0)
8390                 return status;
8391         if (filter_replace.old_filter_type !=
8392             filter_replace.new_filter_type)
8393                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8394                             " original: 0x%x, new: 0x%x",
8395                             dev->device->name,
8396                             filter_replace.old_filter_type,
8397                             filter_replace.new_filter_type);
8398
8399         /* for GTP-U */
8400         memset(&filter_replace, 0,
8401                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8402         memset(&filter_replace_buf, 0,
8403                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8404         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8405         filter_replace.old_filter_type =
8406                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8407         filter_replace.new_filter_type =
8408                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8409         /* Prepare the buffer, 2 entries */
8410         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8411         filter_replace_buf.data[0] |=
8412                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8413         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8414         filter_replace_buf.data[4] |=
8415                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8416
8417         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8418                                                &filter_replace_buf);
8419         if (!status && (filter_replace.old_filter_type !=
8420                         filter_replace.new_filter_type))
8421                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8422                             " original: 0x%x, new: 0x%x",
8423                             dev->device->name,
8424                             filter_replace.old_filter_type,
8425                             filter_replace.new_filter_type);
8426
8427         return status;
8428 }
8429
8430 static enum i40e_status_code
8431 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8432                             enum i40e_l4_port_type l4_port_type)
8433 {
8434         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8435         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8436         enum i40e_status_code status = I40E_SUCCESS;
8437         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8438         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8439
8440         if (pf->support_multi_driver) {
8441                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8442                 return I40E_NOT_SUPPORTED;
8443         }
8444
8445         memset(&filter_replace, 0,
8446                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8447         memset(&filter_replace_buf, 0,
8448                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8449
8450         /* create L1 filter */
8451         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8452                 filter_replace.old_filter_type =
8453                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8454                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8455                 filter_replace_buf.data[8] =
8456                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8457         } else {
8458                 filter_replace.old_filter_type =
8459                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8460                 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8461                 filter_replace_buf.data[8] =
8462                         I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8463         }
8464
8465         filter_replace.tr_bit = 0;
8466         /* Prepare the buffer, 3 entries */
8467         filter_replace_buf.data[0] =
8468                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8469         filter_replace_buf.data[0] |=
8470                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8471         filter_replace_buf.data[2] = 0x00;
8472         filter_replace_buf.data[3] =
8473                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8474         filter_replace_buf.data[4] =
8475                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8476         filter_replace_buf.data[4] |=
8477                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8478         filter_replace_buf.data[5] = 0x00;
8479         filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8480                 I40E_TR_L4_TYPE_TCP |
8481                 I40E_TR_L4_TYPE_SCTP;
8482         filter_replace_buf.data[7] = 0x00;
8483         filter_replace_buf.data[8] |=
8484                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8485         filter_replace_buf.data[9] = 0x00;
8486         filter_replace_buf.data[10] = 0xFF;
8487         filter_replace_buf.data[11] = 0xFF;
8488
8489         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8490                                                &filter_replace_buf);
8491         if (!status && filter_replace.old_filter_type !=
8492             filter_replace.new_filter_type)
8493                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8494                             " original: 0x%x, new: 0x%x",
8495                             dev->device->name,
8496                             filter_replace.old_filter_type,
8497                             filter_replace.new_filter_type);
8498
8499         return status;
8500 }
8501
8502 static enum i40e_status_code
8503 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8504                                enum i40e_l4_port_type l4_port_type)
8505 {
8506         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8507         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8508         enum i40e_status_code status = I40E_SUCCESS;
8509         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8510         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8511
8512         if (pf->support_multi_driver) {
8513                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8514                 return I40E_NOT_SUPPORTED;
8515         }
8516
8517         memset(&filter_replace, 0,
8518                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8519         memset(&filter_replace_buf, 0,
8520                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8521
8522         if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8523                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8524                 filter_replace.new_filter_type =
8525                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8526                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8527         } else {
8528                 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8529                 filter_replace.new_filter_type =
8530                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8531                 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8532         }
8533
8534         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8535         filter_replace.tr_bit = 0;
8536         /* Prepare the buffer, 2 entries */
8537         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8538         filter_replace_buf.data[0] |=
8539                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8540         filter_replace_buf.data[4] |=
8541                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8542         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8543                                                &filter_replace_buf);
8544
8545         if (!status && filter_replace.old_filter_type !=
8546             filter_replace.new_filter_type)
8547                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8548                             " original: 0x%x, new: 0x%x",
8549                             dev->device->name,
8550                             filter_replace.old_filter_type,
8551                             filter_replace.new_filter_type);
8552
8553         return status;
8554 }
8555
8556 int
8557 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8558                       struct i40e_tunnel_filter_conf *tunnel_filter,
8559                       uint8_t add)
8560 {
8561         uint16_t ip_type;
8562         uint32_t ipv4_addr, ipv4_addr_le;
8563         uint8_t i, tun_type = 0;
8564         /* internal variable to convert ipv6 byte order */
8565         uint32_t convert_ipv6[4];
8566         int val, ret = 0;
8567         struct i40e_pf_vf *vf = NULL;
8568         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8569         struct i40e_vsi *vsi;
8570         struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8571         struct i40e_aqc_cloud_filters_element_bb *pfilter;
8572         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8573         struct i40e_tunnel_filter *tunnel, *node;
8574         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8575         uint32_t teid_le;
8576         bool big_buffer = 0;
8577
8578         cld_filter = rte_zmalloc("tunnel_filter",
8579                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8580                          0);
8581
8582         if (cld_filter == NULL) {
8583                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8584                 return -ENOMEM;
8585         }
8586         pfilter = cld_filter;
8587
8588         rte_ether_addr_copy(&tunnel_filter->outer_mac,
8589                         (struct rte_ether_addr *)&pfilter->element.outer_mac);
8590         rte_ether_addr_copy(&tunnel_filter->inner_mac,
8591                         (struct rte_ether_addr *)&pfilter->element.inner_mac);
8592
8593         pfilter->element.inner_vlan =
8594                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8595         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8596                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8597                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8598                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8599                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
8600                                 &ipv4_addr_le,
8601                                 sizeof(pfilter->element.ipaddr.v4.data));
8602         } else {
8603                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8604                 for (i = 0; i < 4; i++) {
8605                         convert_ipv6[i] =
8606                         rte_cpu_to_le_32(rte_be_to_cpu_32(
8607                                          tunnel_filter->ip_addr.ipv6_addr[i]));
8608                 }
8609                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
8610                            &convert_ipv6,
8611                            sizeof(pfilter->element.ipaddr.v6.data));
8612         }
8613
8614         /* check tunneled type */
8615         switch (tunnel_filter->tunnel_type) {
8616         case I40E_TUNNEL_TYPE_VXLAN:
8617                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8618                 break;
8619         case I40E_TUNNEL_TYPE_NVGRE:
8620                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8621                 break;
8622         case I40E_TUNNEL_TYPE_IP_IN_GRE:
8623                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8624                 break;
8625         case I40E_TUNNEL_TYPE_MPLSoUDP:
8626                 if (!pf->mpls_replace_flag) {
8627                         i40e_replace_mpls_l1_filter(pf);
8628                         i40e_replace_mpls_cloud_filter(pf);
8629                         pf->mpls_replace_flag = 1;
8630                 }
8631                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8632                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8633                         teid_le >> 4;
8634                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8635                         (teid_le & 0xF) << 12;
8636                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8637                         0x40;
8638                 big_buffer = 1;
8639                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8640                 break;
8641         case I40E_TUNNEL_TYPE_MPLSoGRE:
8642                 if (!pf->mpls_replace_flag) {
8643                         i40e_replace_mpls_l1_filter(pf);
8644                         i40e_replace_mpls_cloud_filter(pf);
8645                         pf->mpls_replace_flag = 1;
8646                 }
8647                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8648                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8649                         teid_le >> 4;
8650                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8651                         (teid_le & 0xF) << 12;
8652                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8653                         0x0;
8654                 big_buffer = 1;
8655                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8656                 break;
8657         case I40E_TUNNEL_TYPE_GTPC:
8658                 if (!pf->gtp_replace_flag) {
8659                         i40e_replace_gtp_l1_filter(pf);
8660                         i40e_replace_gtp_cloud_filter(pf);
8661                         pf->gtp_replace_flag = 1;
8662                 }
8663                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8664                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8665                         (teid_le >> 16) & 0xFFFF;
8666                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8667                         teid_le & 0xFFFF;
8668                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8669                         0x0;
8670                 big_buffer = 1;
8671                 break;
8672         case I40E_TUNNEL_TYPE_GTPU:
8673                 if (!pf->gtp_replace_flag) {
8674                         i40e_replace_gtp_l1_filter(pf);
8675                         i40e_replace_gtp_cloud_filter(pf);
8676                         pf->gtp_replace_flag = 1;
8677                 }
8678                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8679                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8680                         (teid_le >> 16) & 0xFFFF;
8681                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8682                         teid_le & 0xFFFF;
8683                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8684                         0x0;
8685                 big_buffer = 1;
8686                 break;
8687         case I40E_TUNNEL_TYPE_QINQ:
8688                 if (!pf->qinq_replace_flag) {
8689                         ret = i40e_cloud_filter_qinq_create(pf);
8690                         if (ret < 0)
8691                                 PMD_DRV_LOG(DEBUG,
8692                                             "QinQ tunnel filter already created.");
8693                         pf->qinq_replace_flag = 1;
8694                 }
8695                 /*      Add in the General fields the values of
8696                  *      the Outer and Inner VLAN
8697                  *      Big Buffer should be set, see changes in
8698                  *      i40e_aq_add_cloud_filters
8699                  */
8700                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8701                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8702                 big_buffer = 1;
8703                 break;
8704         case I40E_CLOUD_TYPE_UDP:
8705         case I40E_CLOUD_TYPE_TCP:
8706         case I40E_CLOUD_TYPE_SCTP:
8707                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8708                         if (!pf->sport_replace_flag) {
8709                                 i40e_replace_port_l1_filter(pf,
8710                                                 tunnel_filter->l4_port_type);
8711                                 i40e_replace_port_cloud_filter(pf,
8712                                                 tunnel_filter->l4_port_type);
8713                                 pf->sport_replace_flag = 1;
8714                         }
8715                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8716                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8717                                 I40E_DIRECTION_INGRESS_KEY;
8718
8719                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8720                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8721                                         I40E_TR_L4_TYPE_UDP;
8722                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8723                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8724                                         I40E_TR_L4_TYPE_TCP;
8725                         else
8726                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8727                                         I40E_TR_L4_TYPE_SCTP;
8728
8729                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8730                                 (teid_le >> 16) & 0xFFFF;
8731                         big_buffer = 1;
8732                 } else {
8733                         if (!pf->dport_replace_flag) {
8734                                 i40e_replace_port_l1_filter(pf,
8735                                                 tunnel_filter->l4_port_type);
8736                                 i40e_replace_port_cloud_filter(pf,
8737                                                 tunnel_filter->l4_port_type);
8738                                 pf->dport_replace_flag = 1;
8739                         }
8740                         teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8741                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8742                                 I40E_DIRECTION_INGRESS_KEY;
8743
8744                         if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8745                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8746                                         I40E_TR_L4_TYPE_UDP;
8747                         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8748                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8749                                         I40E_TR_L4_TYPE_TCP;
8750                         else
8751                                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8752                                         I40E_TR_L4_TYPE_SCTP;
8753
8754                         pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8755                                 (teid_le >> 16) & 0xFFFF;
8756                         big_buffer = 1;
8757                 }
8758
8759                 break;
8760         default:
8761                 /* Other tunnel types is not supported. */
8762                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8763                 rte_free(cld_filter);
8764                 return -EINVAL;
8765         }
8766
8767         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8768                 pfilter->element.flags =
8769                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8770         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8771                 pfilter->element.flags =
8772                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8773         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8774                 pfilter->element.flags =
8775                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
8776         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8777                 pfilter->element.flags =
8778                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
8779         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8780                 pfilter->element.flags |=
8781                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
8782         else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8783                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8784                  tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8785                 if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8786                         pfilter->element.flags |=
8787                                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8788                 else
8789                         pfilter->element.flags |=
8790                                 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8791         } else {
8792                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8793                                                 &pfilter->element.flags);
8794                 if (val < 0) {
8795                         rte_free(cld_filter);
8796                         return -EINVAL;
8797                 }
8798         }
8799
8800         pfilter->element.flags |= rte_cpu_to_le_16(
8801                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8802                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8803         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8804         pfilter->element.queue_number =
8805                 rte_cpu_to_le_16(tunnel_filter->queue_id);
8806
8807         if (!tunnel_filter->is_to_vf)
8808                 vsi = pf->main_vsi;
8809         else {
8810                 if (tunnel_filter->vf_id >= pf->vf_num) {
8811                         PMD_DRV_LOG(ERR, "Invalid argument.");
8812                         rte_free(cld_filter);
8813                         return -EINVAL;
8814                 }
8815                 vf = &pf->vfs[tunnel_filter->vf_id];
8816                 vsi = vf->vsi;
8817         }
8818
8819         /* Check if there is the filter in SW list */
8820         memset(&check_filter, 0, sizeof(check_filter));
8821         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8822         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8823         check_filter.vf_id = tunnel_filter->vf_id;
8824         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8825         if (add && node) {
8826                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8827                 rte_free(cld_filter);
8828                 return -EINVAL;
8829         }
8830
8831         if (!add && !node) {
8832                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8833                 rte_free(cld_filter);
8834                 return -EINVAL;
8835         }
8836
8837         if (add) {
8838                 if (big_buffer)
8839                         ret = i40e_aq_add_cloud_filters_bb(hw,
8840                                                    vsi->seid, cld_filter, 1);
8841                 else
8842                         ret = i40e_aq_add_cloud_filters(hw,
8843                                         vsi->seid, &cld_filter->element, 1);
8844                 if (ret < 0) {
8845                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8846                         rte_free(cld_filter);
8847                         return -ENOTSUP;
8848                 }
8849                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8850                 if (tunnel == NULL) {
8851                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8852                         rte_free(cld_filter);
8853                         return -ENOMEM;
8854                 }
8855
8856                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8857                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8858                 if (ret < 0)
8859                         rte_free(tunnel);
8860         } else {
8861                 if (big_buffer)
8862                         ret = i40e_aq_rem_cloud_filters_bb(
8863                                 hw, vsi->seid, cld_filter, 1);
8864                 else
8865                         ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8866                                                 &cld_filter->element, 1);
8867                 if (ret < 0) {
8868                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8869                         rte_free(cld_filter);
8870                         return -ENOTSUP;
8871                 }
8872                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8873         }
8874
8875         rte_free(cld_filter);
8876         return ret;
8877 }
8878
8879 static int
8880 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8881 {
8882         uint8_t i;
8883
8884         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8885                 if (pf->vxlan_ports[i] == port)
8886                         return i;
8887         }
8888
8889         return -1;
8890 }
8891
8892 static int
8893 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8894 {
8895         int  idx, ret;
8896         uint8_t filter_idx = 0;
8897         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8898
8899         idx = i40e_get_vxlan_port_idx(pf, port);
8900
8901         /* Check if port already exists */
8902         if (idx >= 0) {
8903                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8904                 return -EINVAL;
8905         }
8906
8907         /* Now check if there is space to add the new port */
8908         idx = i40e_get_vxlan_port_idx(pf, 0);
8909         if (idx < 0) {
8910                 PMD_DRV_LOG(ERR,
8911                         "Maximum number of UDP ports reached, not adding port %d",
8912                         port);
8913                 return -ENOSPC;
8914         }
8915
8916         ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8917                                         &filter_idx, NULL);
8918         if (ret < 0) {
8919                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8920                 return -1;
8921         }
8922
8923         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8924                          port,  filter_idx);
8925
8926         /* New port: add it and mark its index in the bitmap */
8927         pf->vxlan_ports[idx] = port;
8928         pf->vxlan_bitmap |= (1 << idx);
8929
8930         if (!(pf->flags & I40E_FLAG_VXLAN))
8931                 pf->flags |= I40E_FLAG_VXLAN;
8932
8933         return 0;
8934 }
8935
8936 static int
8937 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8938 {
8939         int idx;
8940         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8941
8942         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8943                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8944                 return -EINVAL;
8945         }
8946
8947         idx = i40e_get_vxlan_port_idx(pf, port);
8948
8949         if (idx < 0) {
8950                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8951                 return -EINVAL;
8952         }
8953
8954         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8955                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8956                 return -1;
8957         }
8958
8959         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8960                         port, idx);
8961
8962         pf->vxlan_ports[idx] = 0;
8963         pf->vxlan_bitmap &= ~(1 << idx);
8964
8965         if (!pf->vxlan_bitmap)
8966                 pf->flags &= ~I40E_FLAG_VXLAN;
8967
8968         return 0;
8969 }
8970
8971 /* Add UDP tunneling port */
8972 static int
8973 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8974                              struct rte_eth_udp_tunnel *udp_tunnel)
8975 {
8976         int ret = 0;
8977         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8978
8979         if (udp_tunnel == NULL)
8980                 return -EINVAL;
8981
8982         switch (udp_tunnel->prot_type) {
8983         case RTE_TUNNEL_TYPE_VXLAN:
8984                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8985                                           I40E_AQC_TUNNEL_TYPE_VXLAN);
8986                 break;
8987         case RTE_TUNNEL_TYPE_VXLAN_GPE:
8988                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8989                                           I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8990                 break;
8991         case RTE_TUNNEL_TYPE_GENEVE:
8992         case RTE_TUNNEL_TYPE_TEREDO:
8993                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8994                 ret = -1;
8995                 break;
8996
8997         default:
8998                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8999                 ret = -1;
9000                 break;
9001         }
9002
9003         return ret;
9004 }
9005
9006 /* Remove UDP tunneling port */
9007 static int
9008 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
9009                              struct rte_eth_udp_tunnel *udp_tunnel)
9010 {
9011         int ret = 0;
9012         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9013
9014         if (udp_tunnel == NULL)
9015                 return -EINVAL;
9016
9017         switch (udp_tunnel->prot_type) {
9018         case RTE_TUNNEL_TYPE_VXLAN:
9019         case RTE_TUNNEL_TYPE_VXLAN_GPE:
9020                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
9021                 break;
9022         case RTE_TUNNEL_TYPE_GENEVE:
9023         case RTE_TUNNEL_TYPE_TEREDO:
9024                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
9025                 ret = -1;
9026                 break;
9027         default:
9028                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
9029                 ret = -1;
9030                 break;
9031         }
9032
9033         return ret;
9034 }
9035
9036 /* Calculate the maximum number of contiguous PF queues that are configured */
9037 static int
9038 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
9039 {
9040         struct rte_eth_dev_data *data = pf->dev_data;
9041         int i, num;
9042         struct i40e_rx_queue *rxq;
9043
9044         num = 0;
9045         for (i = 0; i < pf->lan_nb_qps; i++) {
9046                 rxq = data->rx_queues[i];
9047                 if (rxq && rxq->q_set)
9048                         num++;
9049                 else
9050                         break;
9051         }
9052
9053         return num;
9054 }
9055
9056 /* Configure RSS */
9057 static int
9058 i40e_pf_config_rss(struct i40e_pf *pf)
9059 {
9060         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
9061         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9062         struct rte_eth_rss_conf rss_conf;
9063         uint32_t i, lut = 0;
9064         uint16_t j, num;
9065
9066         /*
9067          * If both VMDQ and RSS enabled, not all of PF queues are configured.
9068          * It's necessary to calculate the actual PF queues that are configured.
9069          */
9070         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
9071                 num = i40e_pf_calc_configured_queues_num(pf);
9072         else
9073                 num = pf->dev_data->nb_rx_queues;
9074
9075         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
9076         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
9077                         num);
9078
9079         if (num == 0) {
9080                 PMD_INIT_LOG(ERR,
9081                         "No PF queues are configured to enable RSS for port %u",
9082                         pf->dev_data->port_id);
9083                 return -ENOTSUP;
9084         }
9085
9086         if (pf->adapter->rss_reta_updated == 0) {
9087                 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
9088                         if (j == num)
9089                                 j = 0;
9090                         lut = (lut << 8) | (j & ((0x1 <<
9091                                 hw->func_caps.rss_table_entry_width) - 1));
9092                         if ((i & 3) == 3)
9093                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
9094                                                rte_bswap32(lut));
9095                 }
9096         }
9097
9098         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
9099         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0 ||
9100             !(mq_mode & ETH_MQ_RX_RSS_FLAG)) {
9101                 i40e_pf_disable_rss(pf);
9102                 return 0;
9103         }
9104         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
9105                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
9106                 /* Random default keys */
9107                 static uint32_t rss_key_default[] = {0x6b793944,
9108                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
9109                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
9110                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
9111
9112                 rss_conf.rss_key = (uint8_t *)rss_key_default;
9113                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
9114                                                         sizeof(uint32_t);
9115         }
9116
9117         return i40e_hw_rss_hash_set(pf, &rss_conf);
9118 }
9119
9120 static int
9121 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
9122                                struct rte_eth_tunnel_filter_conf *filter)
9123 {
9124         if (pf == NULL || filter == NULL) {
9125                 PMD_DRV_LOG(ERR, "Invalid parameter");
9126                 return -EINVAL;
9127         }
9128
9129         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
9130                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9131                 return -EINVAL;
9132         }
9133
9134         if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
9135                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
9136                 return -EINVAL;
9137         }
9138
9139         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
9140                 (rte_is_zero_ether_addr(&filter->outer_mac))) {
9141                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
9142                 return -EINVAL;
9143         }
9144
9145         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
9146                 (rte_is_zero_ether_addr(&filter->inner_mac))) {
9147                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
9148                 return -EINVAL;
9149         }
9150
9151         return 0;
9152 }
9153
9154 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
9155 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
9156 int
9157 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
9158 {
9159         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9160         uint32_t val, reg;
9161         int ret = -EINVAL;
9162
9163         if (pf->support_multi_driver) {
9164                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
9165                 return -ENOTSUP;
9166         }
9167
9168         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
9169         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
9170
9171         if (len == 3) {
9172                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
9173         } else if (len == 4) {
9174                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
9175         } else {
9176                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
9177                 return ret;
9178         }
9179
9180         if (reg != val) {
9181                 ret = i40e_aq_debug_write_global_register(hw,
9182                                                    I40E_GL_PRS_FVBM(2),
9183                                                    reg, NULL);
9184                 if (ret != 0)
9185                         return ret;
9186                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
9187                             "with value 0x%08x",
9188                             I40E_GL_PRS_FVBM(2), reg);
9189         } else {
9190                 ret = 0;
9191         }
9192         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
9193                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
9194
9195         return ret;
9196 }
9197
9198 static int
9199 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
9200 {
9201         int ret = -EINVAL;
9202
9203         if (!hw || !cfg)
9204                 return -EINVAL;
9205
9206         switch (cfg->cfg_type) {
9207         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
9208                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
9209                 break;
9210         default:
9211                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
9212                 break;
9213         }
9214
9215         return ret;
9216 }
9217
9218 static int
9219 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
9220                                enum rte_filter_op filter_op,
9221                                void *arg)
9222 {
9223         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9224         int ret = I40E_ERR_PARAM;
9225
9226         switch (filter_op) {
9227         case RTE_ETH_FILTER_SET:
9228                 ret = i40e_dev_global_config_set(hw,
9229                         (struct rte_eth_global_cfg *)arg);
9230                 break;
9231         default:
9232                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9233                 break;
9234         }
9235
9236         return ret;
9237 }
9238
9239 static int
9240 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
9241                           enum rte_filter_op filter_op,
9242                           void *arg)
9243 {
9244         struct rte_eth_tunnel_filter_conf *filter;
9245         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9246         int ret = I40E_SUCCESS;
9247
9248         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
9249
9250         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
9251                 return I40E_ERR_PARAM;
9252
9253         switch (filter_op) {
9254         case RTE_ETH_FILTER_NOP:
9255                 if (!(pf->flags & I40E_FLAG_VXLAN))
9256                         ret = I40E_NOT_SUPPORTED;
9257                 break;
9258         case RTE_ETH_FILTER_ADD:
9259                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
9260                 break;
9261         case RTE_ETH_FILTER_DELETE:
9262                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
9263                 break;
9264         default:
9265                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
9266                 ret = I40E_ERR_PARAM;
9267                 break;
9268         }
9269
9270         return ret;
9271 }
9272
9273 /* Get the symmetric hash enable configurations per port */
9274 static void
9275 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
9276 {
9277         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9278
9279         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
9280 }
9281
9282 /* Set the symmetric hash enable configurations per port */
9283 static void
9284 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
9285 {
9286         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
9287
9288         if (enable > 0) {
9289                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
9290                         PMD_DRV_LOG(INFO,
9291                                 "Symmetric hash has already been enabled");
9292                         return;
9293                 }
9294                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9295         } else {
9296                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
9297                         PMD_DRV_LOG(INFO,
9298                                 "Symmetric hash has already been disabled");
9299                         return;
9300                 }
9301                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
9302         }
9303         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
9304         I40E_WRITE_FLUSH(hw);
9305 }
9306
9307 /*
9308  * Get global configurations of hash function type and symmetric hash enable
9309  * per flow type (pctype). Note that global configuration means it affects all
9310  * the ports on the same NIC.
9311  */
9312 static int
9313 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
9314                                    struct rte_eth_hash_global_conf *g_cfg)
9315 {
9316         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9317         uint32_t reg;
9318         uint16_t i, j;
9319
9320         memset(g_cfg, 0, sizeof(*g_cfg));
9321         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9322         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
9323                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
9324         else
9325                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
9326         PMD_DRV_LOG(DEBUG, "Hash function is %s",
9327                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
9328
9329         /*
9330          * As i40e supports less than 64 flow types, only first 64 bits need to
9331          * be checked.
9332          */
9333         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9334                 g_cfg->valid_bit_mask[i] = 0ULL;
9335                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
9336         }
9337
9338         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
9339
9340         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
9341                 if (!adapter->pctypes_tbl[i])
9342                         continue;
9343                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9344                      j < I40E_FILTER_PCTYPE_MAX; j++) {
9345                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
9346                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
9347                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
9348                                         g_cfg->sym_hash_enable_mask[0] |=
9349                                                                 (1ULL << i);
9350                                 }
9351                         }
9352                 }
9353         }
9354
9355         return 0;
9356 }
9357
9358 static int
9359 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
9360                               const struct rte_eth_hash_global_conf *g_cfg)
9361 {
9362         uint32_t i;
9363         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
9364
9365         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
9366                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
9367                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
9368                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
9369                                                 g_cfg->hash_func);
9370                 return -EINVAL;
9371         }
9372
9373         /*
9374          * As i40e supports less than 64 flow types, only first 64 bits need to
9375          * be checked.
9376          */
9377         mask0 = g_cfg->valid_bit_mask[0];
9378         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
9379                 if (i == 0) {
9380                         /* Check if any unsupported flow type configured */
9381                         if ((mask0 | i40e_mask) ^ i40e_mask)
9382                                 goto mask_err;
9383                 } else {
9384                         if (g_cfg->valid_bit_mask[i])
9385                                 goto mask_err;
9386                 }
9387         }
9388
9389         return 0;
9390
9391 mask_err:
9392         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
9393
9394         return -EINVAL;
9395 }
9396
9397 /*
9398  * Set global configurations of hash function type and symmetric hash enable
9399  * per flow type (pctype). Note any modifying global configuration will affect
9400  * all the ports on the same NIC.
9401  */
9402 static int
9403 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
9404                                    struct rte_eth_hash_global_conf *g_cfg)
9405 {
9406         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9407         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9408         int ret;
9409         uint16_t i, j;
9410         uint32_t reg;
9411         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
9412
9413         if (pf->support_multi_driver) {
9414                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
9415                 return -ENOTSUP;
9416         }
9417
9418         /* Check the input parameters */
9419         ret = i40e_hash_global_config_check(adapter, g_cfg);
9420         if (ret < 0)
9421                 return ret;
9422
9423         /*
9424          * As i40e supports less than 64 flow types, only first 64 bits need to
9425          * be configured.
9426          */
9427         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
9428                 if (mask0 & (1UL << i)) {
9429                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
9430                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
9431
9432                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9433                              j < I40E_FILTER_PCTYPE_MAX; j++) {
9434                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
9435                                         i40e_write_global_rx_ctl(hw,
9436                                                           I40E_GLQF_HSYM(j),
9437                                                           reg);
9438                         }
9439                 }
9440         }
9441
9442         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9443         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
9444                 /* Toeplitz */
9445                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
9446                         PMD_DRV_LOG(DEBUG,
9447                                 "Hash function already set to Toeplitz");
9448                         goto out;
9449                 }
9450                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
9451         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
9452                 /* Simple XOR */
9453                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
9454                         PMD_DRV_LOG(DEBUG,
9455                                 "Hash function already set to Simple XOR");
9456                         goto out;
9457                 }
9458                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
9459         } else
9460                 /* Use the default, and keep it as it is */
9461                 goto out;
9462
9463         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
9464
9465 out:
9466         I40E_WRITE_FLUSH(hw);
9467
9468         return 0;
9469 }
9470
9471 /**
9472  * Valid input sets for hash and flow director filters per PCTYPE
9473  */
9474 static uint64_t
9475 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9476                 enum rte_filter_type filter)
9477 {
9478         uint64_t valid;
9479
9480         static const uint64_t valid_hash_inset_table[] = {
9481                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9482                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9483                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9484                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9485                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9486                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9487                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9488                         I40E_INSET_FLEX_PAYLOAD,
9489                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9490                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9491                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9492                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9493                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9494                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9495                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9496                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9497                         I40E_INSET_FLEX_PAYLOAD,
9498                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9499                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9500                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9501                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9502                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9503                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9504                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9505                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9506                         I40E_INSET_FLEX_PAYLOAD,
9507                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9508                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9509                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9510                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9511                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9512                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9513                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9514                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9515                         I40E_INSET_FLEX_PAYLOAD,
9516                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9517                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9518                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9519                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9520                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9521                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9522                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9523                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9524                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9525                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9526                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9527                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9528                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9529                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9530                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9531                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9532                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9533                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9534                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9535                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9536                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9537                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9538                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9539                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9540                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9541                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9542                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9543                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9544                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9545                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9546                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9547                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9548                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9549                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9550                         I40E_INSET_FLEX_PAYLOAD,
9551                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9552                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9553                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9554                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9555                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9556                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9557                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9558                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9559                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9560                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9561                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9562                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9563                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9564                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9565                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9566                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9567                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9568                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9569                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9570                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9571                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9572                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9573                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9574                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9575                         I40E_INSET_FLEX_PAYLOAD,
9576                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9577                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9578                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9579                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9580                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9581                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9582                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9583                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9584                         I40E_INSET_FLEX_PAYLOAD,
9585                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9586                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9587                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9588                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9589                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9590                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9591                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9592                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9593                         I40E_INSET_FLEX_PAYLOAD,
9594                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9595                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9596                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9597                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9598                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9599                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9600                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9601                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9602                         I40E_INSET_FLEX_PAYLOAD,
9603                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9604                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9605                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9606                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9607                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9608                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9609                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9610                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9611                         I40E_INSET_FLEX_PAYLOAD,
9612                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9613                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9614                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9615                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9616                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9617                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9618                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9619                         I40E_INSET_FLEX_PAYLOAD,
9620                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9621                         I40E_INSET_DMAC | I40E_INSET_SMAC |
9622                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9623                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9624                         I40E_INSET_FLEX_PAYLOAD,
9625         };
9626
9627         /**
9628          * Flow director supports only fields defined in
9629          * union rte_eth_fdir_flow.
9630          */
9631         static const uint64_t valid_fdir_inset_table[] = {
9632                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9633                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9634                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9635                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9636                 I40E_INSET_IPV4_TTL,
9637                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9638                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9639                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9640                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9641                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9642                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9643                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9644                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9645                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9646                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9647                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9648                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9649                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9650                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9651                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9652                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9653                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9654                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9655                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9656                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9657                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9658                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9659                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9660                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9661                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9662                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9663                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9664                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9665                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9666                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9667                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9668                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9669                 I40E_INSET_SCTP_VT,
9670                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9671                 I40E_INSET_DMAC | I40E_INSET_SMAC |
9672                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9673                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9674                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9675                 I40E_INSET_IPV4_TTL,
9676                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9677                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9678                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9679                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9680                 I40E_INSET_IPV6_HOP_LIMIT,
9681                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9682                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9683                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9684                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9685                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9686                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9687                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9688                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9689                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9690                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9691                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9692                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9693                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9694                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9695                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9696                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9697                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9698                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9699                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9700                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9701                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9702                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9703                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9704                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9705                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9706                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9707                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9708                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9709                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9710                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9711                 I40E_INSET_SCTP_VT,
9712                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9713                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9714                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9715                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9716                 I40E_INSET_IPV6_HOP_LIMIT,
9717                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9718                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9719                 I40E_INSET_LAST_ETHER_TYPE,
9720         };
9721
9722         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9723                 return 0;
9724         if (filter == RTE_ETH_FILTER_HASH)
9725                 valid = valid_hash_inset_table[pctype];
9726         else
9727                 valid = valid_fdir_inset_table[pctype];
9728
9729         return valid;
9730 }
9731
9732 /**
9733  * Validate if the input set is allowed for a specific PCTYPE
9734  */
9735 int
9736 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9737                 enum rte_filter_type filter, uint64_t inset)
9738 {
9739         uint64_t valid;
9740
9741         valid = i40e_get_valid_input_set(pctype, filter);
9742         if (inset & (~valid))
9743                 return -EINVAL;
9744
9745         return 0;
9746 }
9747
9748 /* default input set fields combination per pctype */
9749 uint64_t
9750 i40e_get_default_input_set(uint16_t pctype)
9751 {
9752         static const uint64_t default_inset_table[] = {
9753                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9754                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9755                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9756                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9757                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9758                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9759                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9760                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9761                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9762                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9763                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9764                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9765                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9766                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9767                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9768                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9769                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9770                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9771                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9772                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9773                         I40E_INSET_SCTP_VT,
9774                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9775                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9776                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9777                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9778                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9779                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9780                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9781                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9782                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9783                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9784                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9785                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9786                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9787                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9788                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9789                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9790                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9791                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9792                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9793                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9794                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9795                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9796                         I40E_INSET_SCTP_VT,
9797                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9798                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9799                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9800                         I40E_INSET_LAST_ETHER_TYPE,
9801         };
9802
9803         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9804                 return 0;
9805
9806         return default_inset_table[pctype];
9807 }
9808
9809 /**
9810  * Parse the input set from index to logical bit masks
9811  */
9812 static int
9813 i40e_parse_input_set(uint64_t *inset,
9814                      enum i40e_filter_pctype pctype,
9815                      enum rte_eth_input_set_field *field,
9816                      uint16_t size)
9817 {
9818         uint16_t i, j;
9819         int ret = -EINVAL;
9820
9821         static const struct {
9822                 enum rte_eth_input_set_field field;
9823                 uint64_t inset;
9824         } inset_convert_table[] = {
9825                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9826                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9827                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9828                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9829                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9830                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9831                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9832                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9833                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9834                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9835                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9836                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9837                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9838                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9839                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9840                         I40E_INSET_IPV6_NEXT_HDR},
9841                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9842                         I40E_INSET_IPV6_HOP_LIMIT},
9843                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9844                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9845                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9846                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9847                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9848                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9849                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9850                         I40E_INSET_SCTP_VT},
9851                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9852                         I40E_INSET_TUNNEL_DMAC},
9853                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9854                         I40E_INSET_VLAN_TUNNEL},
9855                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9856                         I40E_INSET_TUNNEL_ID},
9857                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9858                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9859                         I40E_INSET_FLEX_PAYLOAD_W1},
9860                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9861                         I40E_INSET_FLEX_PAYLOAD_W2},
9862                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9863                         I40E_INSET_FLEX_PAYLOAD_W3},
9864                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9865                         I40E_INSET_FLEX_PAYLOAD_W4},
9866                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9867                         I40E_INSET_FLEX_PAYLOAD_W5},
9868                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9869                         I40E_INSET_FLEX_PAYLOAD_W6},
9870                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9871                         I40E_INSET_FLEX_PAYLOAD_W7},
9872                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9873                         I40E_INSET_FLEX_PAYLOAD_W8},
9874         };
9875
9876         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9877                 return ret;
9878
9879         /* Only one item allowed for default or all */
9880         if (size == 1) {
9881                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9882                         *inset = i40e_get_default_input_set(pctype);
9883                         return 0;
9884                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9885                         *inset = I40E_INSET_NONE;
9886                         return 0;
9887                 }
9888         }
9889
9890         for (i = 0, *inset = 0; i < size; i++) {
9891                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9892                         if (field[i] == inset_convert_table[j].field) {
9893                                 *inset |= inset_convert_table[j].inset;
9894                                 break;
9895                         }
9896                 }
9897
9898                 /* It contains unsupported input set, return immediately */
9899                 if (j == RTE_DIM(inset_convert_table))
9900                         return ret;
9901         }
9902
9903         return 0;
9904 }
9905
9906 /**
9907  * Translate the input set from bit masks to register aware bit masks
9908  * and vice versa
9909  */
9910 uint64_t
9911 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9912 {
9913         uint64_t val = 0;
9914         uint16_t i;
9915
9916         struct inset_map {
9917                 uint64_t inset;
9918                 uint64_t inset_reg;
9919         };
9920
9921         static const struct inset_map inset_map_common[] = {
9922                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9923                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9924                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9925                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9926                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9927                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9928                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9929                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9930                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9931                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9932                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9933                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9934                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9935                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9936                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9937                 {I40E_INSET_TUNNEL_DMAC,
9938                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9939                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9940                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9941                 {I40E_INSET_TUNNEL_SRC_PORT,
9942                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9943                 {I40E_INSET_TUNNEL_DST_PORT,
9944                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9945                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9946                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9947                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9948                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9949                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9950                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9951                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9952                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9953                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9954         };
9955
9956     /* some different registers map in x722*/
9957         static const struct inset_map inset_map_diff_x722[] = {
9958                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9959                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9960                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9961                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9962         };
9963
9964         static const struct inset_map inset_map_diff_not_x722[] = {
9965                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9966                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9967                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9968                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9969         };
9970
9971         if (input == 0)
9972                 return val;
9973
9974         /* Translate input set to register aware inset */
9975         if (type == I40E_MAC_X722) {
9976                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9977                         if (input & inset_map_diff_x722[i].inset)
9978                                 val |= inset_map_diff_x722[i].inset_reg;
9979                 }
9980         } else {
9981                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9982                         if (input & inset_map_diff_not_x722[i].inset)
9983                                 val |= inset_map_diff_not_x722[i].inset_reg;
9984                 }
9985         }
9986
9987         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9988                 if (input & inset_map_common[i].inset)
9989                         val |= inset_map_common[i].inset_reg;
9990         }
9991
9992         return val;
9993 }
9994
9995 int
9996 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9997 {
9998         uint8_t i, idx = 0;
9999         uint64_t inset_need_mask = inset;
10000
10001         static const struct {
10002                 uint64_t inset;
10003                 uint32_t mask;
10004         } inset_mask_map[] = {
10005                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
10006                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
10007                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
10008                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
10009                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
10010                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
10011                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
10012                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
10013         };
10014
10015         if (!inset || !mask || !nb_elem)
10016                 return 0;
10017
10018         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
10019                 /* Clear the inset bit, if no MASK is required,
10020                  * for example proto + ttl
10021                  */
10022                 if ((inset & inset_mask_map[i].inset) ==
10023                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
10024                         inset_need_mask &= ~inset_mask_map[i].inset;
10025                 if (!inset_need_mask)
10026                         return 0;
10027         }
10028         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
10029                 if ((inset_need_mask & inset_mask_map[i].inset) ==
10030                     inset_mask_map[i].inset) {
10031                         if (idx >= nb_elem) {
10032                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
10033                                 return -EINVAL;
10034                         }
10035                         mask[idx] = inset_mask_map[i].mask;
10036                         idx++;
10037                 }
10038         }
10039
10040         return idx;
10041 }
10042
10043 void
10044 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
10045 {
10046         uint32_t reg = i40e_read_rx_ctl(hw, addr);
10047
10048         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
10049         if (reg != val)
10050                 i40e_write_rx_ctl(hw, addr, val);
10051         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
10052                     (uint32_t)i40e_read_rx_ctl(hw, addr));
10053 }
10054
10055 void
10056 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
10057 {
10058         uint32_t reg = i40e_read_rx_ctl(hw, addr);
10059         struct rte_eth_dev *dev;
10060
10061         dev = ((struct i40e_adapter *)hw->back)->eth_dev;
10062         if (reg != val) {
10063                 i40e_write_rx_ctl(hw, addr, val);
10064                 PMD_DRV_LOG(WARNING,
10065                             "i40e device %s changed global register [0x%08x]."
10066                             " original: 0x%08x, new: 0x%08x",
10067                             dev->device->name, addr, reg,
10068                             (uint32_t)i40e_read_rx_ctl(hw, addr));
10069         }
10070 }
10071
10072 static void
10073 i40e_filter_input_set_init(struct i40e_pf *pf)
10074 {
10075         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10076         enum i40e_filter_pctype pctype;
10077         uint64_t input_set, inset_reg;
10078         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10079         int num, i;
10080         uint16_t flow_type;
10081
10082         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
10083              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
10084                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
10085
10086                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
10087                         continue;
10088
10089                 input_set = i40e_get_default_input_set(pctype);
10090
10091                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10092                                                    I40E_INSET_MASK_NUM_REG);
10093                 if (num < 0)
10094                         return;
10095                 if (pf->support_multi_driver && num > 0) {
10096                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
10097                         return;
10098                 }
10099                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
10100                                         input_set);
10101
10102                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
10103                                       (uint32_t)(inset_reg & UINT32_MAX));
10104                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
10105                                      (uint32_t)((inset_reg >>
10106                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
10107                 if (!pf->support_multi_driver) {
10108                         i40e_check_write_global_reg(hw,
10109                                             I40E_GLQF_HASH_INSET(0, pctype),
10110                                             (uint32_t)(inset_reg & UINT32_MAX));
10111                         i40e_check_write_global_reg(hw,
10112                                              I40E_GLQF_HASH_INSET(1, pctype),
10113                                              (uint32_t)((inset_reg >>
10114                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
10115
10116                         for (i = 0; i < num; i++) {
10117                                 i40e_check_write_global_reg(hw,
10118                                                     I40E_GLQF_FD_MSK(i, pctype),
10119                                                     mask_reg[i]);
10120                                 i40e_check_write_global_reg(hw,
10121                                                   I40E_GLQF_HASH_MSK(i, pctype),
10122                                                   mask_reg[i]);
10123                         }
10124                         /*clear unused mask registers of the pctype */
10125                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
10126                                 i40e_check_write_global_reg(hw,
10127                                                     I40E_GLQF_FD_MSK(i, pctype),
10128                                                     0);
10129                                 i40e_check_write_global_reg(hw,
10130                                                   I40E_GLQF_HASH_MSK(i, pctype),
10131                                                   0);
10132                         }
10133                 } else {
10134                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
10135                 }
10136                 I40E_WRITE_FLUSH(hw);
10137
10138                 /* store the default input set */
10139                 if (!pf->support_multi_driver)
10140                         pf->hash_input_set[pctype] = input_set;
10141                 pf->fdir.input_set[pctype] = input_set;
10142         }
10143 }
10144
10145 int
10146 i40e_hash_filter_inset_select(struct i40e_hw *hw,
10147                          struct rte_eth_input_set_conf *conf)
10148 {
10149         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
10150         enum i40e_filter_pctype pctype;
10151         uint64_t input_set, inset_reg = 0;
10152         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10153         int ret, i, num;
10154
10155         if (!conf) {
10156                 PMD_DRV_LOG(ERR, "Invalid pointer");
10157                 return -EFAULT;
10158         }
10159         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10160             conf->op != RTE_ETH_INPUT_SET_ADD) {
10161                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10162                 return -EINVAL;
10163         }
10164
10165         if (pf->support_multi_driver) {
10166                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
10167                 return -ENOTSUP;
10168         }
10169
10170         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10171         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10172                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10173                 return -EINVAL;
10174         }
10175
10176         if (hw->mac.type == I40E_MAC_X722) {
10177                 /* get translated pctype value in fd pctype register */
10178                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
10179                         I40E_GLQF_FD_PCTYPES((int)pctype));
10180         }
10181
10182         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10183                                    conf->inset_size);
10184         if (ret) {
10185                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10186                 return -EINVAL;
10187         }
10188
10189         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
10190                 /* get inset value in register */
10191                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
10192                 inset_reg <<= I40E_32_BIT_WIDTH;
10193                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
10194                 input_set |= pf->hash_input_set[pctype];
10195         }
10196         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10197                                            I40E_INSET_MASK_NUM_REG);
10198         if (num < 0)
10199                 return -EINVAL;
10200
10201         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10202
10203         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
10204                                     (uint32_t)(inset_reg & UINT32_MAX));
10205         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
10206                                     (uint32_t)((inset_reg >>
10207                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
10208
10209         for (i = 0; i < num; i++)
10210                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10211                                             mask_reg[i]);
10212         /*clear unused mask registers of the pctype */
10213         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10214                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
10215                                             0);
10216         I40E_WRITE_FLUSH(hw);
10217
10218         pf->hash_input_set[pctype] = input_set;
10219         return 0;
10220 }
10221
10222 int
10223 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
10224                          struct rte_eth_input_set_conf *conf)
10225 {
10226         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10227         enum i40e_filter_pctype pctype;
10228         uint64_t input_set, inset_reg = 0;
10229         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
10230         int ret, i, num;
10231
10232         if (!hw || !conf) {
10233                 PMD_DRV_LOG(ERR, "Invalid pointer");
10234                 return -EFAULT;
10235         }
10236         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
10237             conf->op != RTE_ETH_INPUT_SET_ADD) {
10238                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
10239                 return -EINVAL;
10240         }
10241
10242         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
10243
10244         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
10245                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
10246                 return -EINVAL;
10247         }
10248
10249         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
10250                                    conf->inset_size);
10251         if (ret) {
10252                 PMD_DRV_LOG(ERR, "Failed to parse input set");
10253                 return -EINVAL;
10254         }
10255
10256         /* get inset value in register */
10257         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
10258         inset_reg <<= I40E_32_BIT_WIDTH;
10259         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
10260
10261         /* Can not change the inset reg for flex payload for fdir,
10262          * it is done by writing I40E_PRTQF_FD_FLXINSET
10263          * in i40e_set_flex_mask_on_pctype.
10264          */
10265         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
10266                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
10267         else
10268                 input_set |= pf->fdir.input_set[pctype];
10269         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
10270                                            I40E_INSET_MASK_NUM_REG);
10271         if (num < 0)
10272                 return -EINVAL;
10273         if (pf->support_multi_driver && num > 0) {
10274                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10275                 return -ENOTSUP;
10276         }
10277
10278         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
10279
10280         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
10281                               (uint32_t)(inset_reg & UINT32_MAX));
10282         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
10283                              (uint32_t)((inset_reg >>
10284                              I40E_32_BIT_WIDTH) & UINT32_MAX));
10285
10286         if (!pf->support_multi_driver) {
10287                 for (i = 0; i < num; i++)
10288                         i40e_check_write_global_reg(hw,
10289                                                     I40E_GLQF_FD_MSK(i, pctype),
10290                                                     mask_reg[i]);
10291                 /*clear unused mask registers of the pctype */
10292                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
10293                         i40e_check_write_global_reg(hw,
10294                                                     I40E_GLQF_FD_MSK(i, pctype),
10295                                                     0);
10296         } else {
10297                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
10298         }
10299         I40E_WRITE_FLUSH(hw);
10300
10301         pf->fdir.input_set[pctype] = input_set;
10302         return 0;
10303 }
10304
10305 static int
10306 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10307 {
10308         int ret = 0;
10309
10310         if (!hw || !info) {
10311                 PMD_DRV_LOG(ERR, "Invalid pointer");
10312                 return -EFAULT;
10313         }
10314
10315         switch (info->info_type) {
10316         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10317                 i40e_get_symmetric_hash_enable_per_port(hw,
10318                                         &(info->info.enable));
10319                 break;
10320         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10321                 ret = i40e_get_hash_filter_global_config(hw,
10322                                 &(info->info.global_conf));
10323                 break;
10324         default:
10325                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10326                                                         info->info_type);
10327                 ret = -EINVAL;
10328                 break;
10329         }
10330
10331         return ret;
10332 }
10333
10334 static int
10335 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
10336 {
10337         int ret = 0;
10338
10339         if (!hw || !info) {
10340                 PMD_DRV_LOG(ERR, "Invalid pointer");
10341                 return -EFAULT;
10342         }
10343
10344         switch (info->info_type) {
10345         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
10346                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
10347                 break;
10348         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
10349                 ret = i40e_set_hash_filter_global_config(hw,
10350                                 &(info->info.global_conf));
10351                 break;
10352         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
10353                 ret = i40e_hash_filter_inset_select(hw,
10354                                                &(info->info.input_set_conf));
10355                 break;
10356
10357         default:
10358                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
10359                                                         info->info_type);
10360                 ret = -EINVAL;
10361                 break;
10362         }
10363
10364         return ret;
10365 }
10366
10367 /* Operations for hash function */
10368 static int
10369 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
10370                       enum rte_filter_op filter_op,
10371                       void *arg)
10372 {
10373         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10374         int ret = 0;
10375
10376         switch (filter_op) {
10377         case RTE_ETH_FILTER_NOP:
10378                 break;
10379         case RTE_ETH_FILTER_GET:
10380                 ret = i40e_hash_filter_get(hw,
10381                         (struct rte_eth_hash_filter_info *)arg);
10382                 break;
10383         case RTE_ETH_FILTER_SET:
10384                 ret = i40e_hash_filter_set(hw,
10385                         (struct rte_eth_hash_filter_info *)arg);
10386                 break;
10387         default:
10388                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
10389                                                                 filter_op);
10390                 ret = -ENOTSUP;
10391                 break;
10392         }
10393
10394         return ret;
10395 }
10396
10397 /* Convert ethertype filter structure */
10398 static int
10399 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
10400                               struct i40e_ethertype_filter *filter)
10401 {
10402         rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
10403                 RTE_ETHER_ADDR_LEN);
10404         filter->input.ether_type = input->ether_type;
10405         filter->flags = input->flags;
10406         filter->queue = input->queue;
10407
10408         return 0;
10409 }
10410
10411 /* Check if there exists the ehtertype filter */
10412 struct i40e_ethertype_filter *
10413 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
10414                                 const struct i40e_ethertype_filter_input *input)
10415 {
10416         int ret;
10417
10418         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
10419         if (ret < 0)
10420                 return NULL;
10421
10422         return ethertype_rule->hash_map[ret];
10423 }
10424
10425 /* Add ethertype filter in SW list */
10426 static int
10427 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
10428                                 struct i40e_ethertype_filter *filter)
10429 {
10430         struct i40e_ethertype_rule *rule = &pf->ethertype;
10431         int ret;
10432
10433         ret = rte_hash_add_key(rule->hash_table, &filter->input);
10434         if (ret < 0) {
10435                 PMD_DRV_LOG(ERR,
10436                             "Failed to insert ethertype filter"
10437                             " to hash table %d!",
10438                             ret);
10439                 return ret;
10440         }
10441         rule->hash_map[ret] = filter;
10442
10443         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
10444
10445         return 0;
10446 }
10447
10448 /* Delete ethertype filter in SW list */
10449 int
10450 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
10451                              struct i40e_ethertype_filter_input *input)
10452 {
10453         struct i40e_ethertype_rule *rule = &pf->ethertype;
10454         struct i40e_ethertype_filter *filter;
10455         int ret;
10456
10457         ret = rte_hash_del_key(rule->hash_table, input);
10458         if (ret < 0) {
10459                 PMD_DRV_LOG(ERR,
10460                             "Failed to delete ethertype filter"
10461                             " to hash table %d!",
10462                             ret);
10463                 return ret;
10464         }
10465         filter = rule->hash_map[ret];
10466         rule->hash_map[ret] = NULL;
10467
10468         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
10469         rte_free(filter);
10470
10471         return 0;
10472 }
10473
10474 /*
10475  * Configure ethertype filter, which can director packet by filtering
10476  * with mac address and ether_type or only ether_type
10477  */
10478 int
10479 i40e_ethertype_filter_set(struct i40e_pf *pf,
10480                         struct rte_eth_ethertype_filter *filter,
10481                         bool add)
10482 {
10483         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10484         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
10485         struct i40e_ethertype_filter *ethertype_filter, *node;
10486         struct i40e_ethertype_filter check_filter;
10487         struct i40e_control_filter_stats stats;
10488         uint16_t flags = 0;
10489         int ret;
10490
10491         if (filter->queue >= pf->dev_data->nb_rx_queues) {
10492                 PMD_DRV_LOG(ERR, "Invalid queue ID");
10493                 return -EINVAL;
10494         }
10495         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
10496                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
10497                 PMD_DRV_LOG(ERR,
10498                         "unsupported ether_type(0x%04x) in control packet filter.",
10499                         filter->ether_type);
10500                 return -EINVAL;
10501         }
10502         if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
10503                 PMD_DRV_LOG(WARNING,
10504                         "filter vlan ether_type in first tag is not supported.");
10505
10506         /* Check if there is the filter in SW list */
10507         memset(&check_filter, 0, sizeof(check_filter));
10508         i40e_ethertype_filter_convert(filter, &check_filter);
10509         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
10510                                                &check_filter.input);
10511         if (add && node) {
10512                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
10513                 return -EINVAL;
10514         }
10515
10516         if (!add && !node) {
10517                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
10518                 return -EINVAL;
10519         }
10520
10521         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
10522                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10523         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
10524                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10525         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10526
10527         memset(&stats, 0, sizeof(stats));
10528         ret = i40e_aq_add_rem_control_packet_filter(hw,
10529                         filter->mac_addr.addr_bytes,
10530                         filter->ether_type, flags,
10531                         pf->main_vsi->seid,
10532                         filter->queue, add, &stats, NULL);
10533
10534         PMD_DRV_LOG(INFO,
10535                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
10536                 ret, stats.mac_etype_used, stats.etype_used,
10537                 stats.mac_etype_free, stats.etype_free);
10538         if (ret < 0)
10539                 return -ENOSYS;
10540
10541         /* Add or delete a filter in SW list */
10542         if (add) {
10543                 ethertype_filter = rte_zmalloc("ethertype_filter",
10544                                        sizeof(*ethertype_filter), 0);
10545                 if (ethertype_filter == NULL) {
10546                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
10547                         return -ENOMEM;
10548                 }
10549
10550                 rte_memcpy(ethertype_filter, &check_filter,
10551                            sizeof(check_filter));
10552                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
10553                 if (ret < 0)
10554                         rte_free(ethertype_filter);
10555         } else {
10556                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
10557         }
10558
10559         return ret;
10560 }
10561
10562 /*
10563  * Handle operations for ethertype filter.
10564  */
10565 static int
10566 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
10567                                 enum rte_filter_op filter_op,
10568                                 void *arg)
10569 {
10570         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10571         int ret = 0;
10572
10573         if (filter_op == RTE_ETH_FILTER_NOP)
10574                 return ret;
10575
10576         if (arg == NULL) {
10577                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
10578                             filter_op);
10579                 return -EINVAL;
10580         }
10581
10582         switch (filter_op) {
10583         case RTE_ETH_FILTER_ADD:
10584                 ret = i40e_ethertype_filter_set(pf,
10585                         (struct rte_eth_ethertype_filter *)arg,
10586                         TRUE);
10587                 break;
10588         case RTE_ETH_FILTER_DELETE:
10589                 ret = i40e_ethertype_filter_set(pf,
10590                         (struct rte_eth_ethertype_filter *)arg,
10591                         FALSE);
10592                 break;
10593         default:
10594                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
10595                 ret = -ENOSYS;
10596                 break;
10597         }
10598         return ret;
10599 }
10600
10601 static int
10602 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
10603                      enum rte_filter_type filter_type,
10604                      enum rte_filter_op filter_op,
10605                      void *arg)
10606 {
10607         int ret = 0;
10608
10609         if (dev == NULL)
10610                 return -EINVAL;
10611
10612         switch (filter_type) {
10613         case RTE_ETH_FILTER_NONE:
10614                 /* For global configuration */
10615                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
10616                 break;
10617         case RTE_ETH_FILTER_HASH:
10618                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
10619                 break;
10620         case RTE_ETH_FILTER_MACVLAN:
10621                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
10622                 break;
10623         case RTE_ETH_FILTER_ETHERTYPE:
10624                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
10625                 break;
10626         case RTE_ETH_FILTER_TUNNEL:
10627                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
10628                 break;
10629         case RTE_ETH_FILTER_FDIR:
10630                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
10631                 break;
10632         case RTE_ETH_FILTER_GENERIC:
10633                 if (filter_op != RTE_ETH_FILTER_GET)
10634                         return -EINVAL;
10635                 *(const void **)arg = &i40e_flow_ops;
10636                 break;
10637         default:
10638                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
10639                                                         filter_type);
10640                 ret = -EINVAL;
10641                 break;
10642         }
10643
10644         return ret;
10645 }
10646
10647 /*
10648  * Check and enable Extended Tag.
10649  * Enabling Extended Tag is important for 40G performance.
10650  */
10651 static void
10652 i40e_enable_extended_tag(struct rte_eth_dev *dev)
10653 {
10654         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10655         uint32_t buf = 0;
10656         int ret;
10657
10658         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10659                                       PCI_DEV_CAP_REG);
10660         if (ret < 0) {
10661                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10662                             PCI_DEV_CAP_REG);
10663                 return;
10664         }
10665         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
10666                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
10667                 return;
10668         }
10669
10670         buf = 0;
10671         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10672                                       PCI_DEV_CTRL_REG);
10673         if (ret < 0) {
10674                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10675                             PCI_DEV_CTRL_REG);
10676                 return;
10677         }
10678         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
10679                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
10680                 return;
10681         }
10682         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
10683         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
10684                                        PCI_DEV_CTRL_REG);
10685         if (ret < 0) {
10686                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
10687                             PCI_DEV_CTRL_REG);
10688                 return;
10689         }
10690 }
10691
10692 /*
10693  * As some registers wouldn't be reset unless a global hardware reset,
10694  * hardware initialization is needed to put those registers into an
10695  * expected initial state.
10696  */
10697 static void
10698 i40e_hw_init(struct rte_eth_dev *dev)
10699 {
10700         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10701
10702         i40e_enable_extended_tag(dev);
10703
10704         /* clear the PF Queue Filter control register */
10705         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
10706
10707         /* Disable symmetric hash per port */
10708         i40e_set_symmetric_hash_enable_per_port(hw, 0);
10709 }
10710
10711 /*
10712  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
10713  * however this function will return only one highest pctype index,
10714  * which is not quite correct. This is known problem of i40e driver
10715  * and needs to be fixed later.
10716  */
10717 enum i40e_filter_pctype
10718 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
10719 {
10720         int i;
10721         uint64_t pctype_mask;
10722
10723         if (flow_type < I40E_FLOW_TYPE_MAX) {
10724                 pctype_mask = adapter->pctypes_tbl[flow_type];
10725                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10726                         if (pctype_mask & (1ULL << i))
10727                                 return (enum i40e_filter_pctype)i;
10728                 }
10729         }
10730         return I40E_FILTER_PCTYPE_INVALID;
10731 }
10732
10733 uint16_t
10734 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10735                         enum i40e_filter_pctype pctype)
10736 {
10737         uint16_t flowtype;
10738         uint64_t pctype_mask = 1ULL << pctype;
10739
10740         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10741              flowtype++) {
10742                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10743                         return flowtype;
10744         }
10745
10746         return RTE_ETH_FLOW_UNKNOWN;
10747 }
10748
10749 /*
10750  * On X710, performance number is far from the expectation on recent firmware
10751  * versions; on XL710, performance number is also far from the expectation on
10752  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10753  * mode is enabled and port MAC address is equal to the packet destination MAC
10754  * address. The fix for this issue may not be integrated in the following
10755  * firmware version. So the workaround in software driver is needed. It needs
10756  * to modify the initial values of 3 internal only registers for both X710 and
10757  * XL710. Note that the values for X710 or XL710 could be different, and the
10758  * workaround can be removed when it is fixed in firmware in the future.
10759  */
10760
10761 /* For both X710 and XL710 */
10762 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
10763 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
10764 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
10765
10766 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10767 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10768
10769 /* For X722 */
10770 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10771 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10772
10773 /* For X710 */
10774 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10775 /* For XL710 */
10776 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10777 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10778
10779 /*
10780  * GL_SWR_PM_UP_THR:
10781  * The value is not impacted from the link speed, its value is set according
10782  * to the total number of ports for a better pipe-monitor configuration.
10783  */
10784 static bool
10785 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10786 {
10787 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10788                 .device_id = (dev),   \
10789                 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10790
10791 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10792                 .device_id = (dev),   \
10793                 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10794
10795         static const struct {
10796                 uint16_t device_id;
10797                 uint32_t val;
10798         } swr_pm_table[] = {
10799                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10800                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10801                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10802                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10803                 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10804
10805                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10806                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10807                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10808                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10809                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10810                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10811                 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10812         };
10813         uint32_t i;
10814
10815         if (value == NULL) {
10816                 PMD_DRV_LOG(ERR, "value is NULL");
10817                 return false;
10818         }
10819
10820         for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10821                 if (hw->device_id == swr_pm_table[i].device_id) {
10822                         *value = swr_pm_table[i].val;
10823
10824                         PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10825                                     "value - 0x%08x",
10826                                     hw->device_id, *value);
10827                         return true;
10828                 }
10829         }
10830
10831         return false;
10832 }
10833
10834 static int
10835 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10836 {
10837         enum i40e_status_code status;
10838         struct i40e_aq_get_phy_abilities_resp phy_ab;
10839         int ret = -ENOTSUP;
10840         int retries = 0;
10841
10842         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10843                                               NULL);
10844
10845         while (status) {
10846                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10847                         status);
10848                 retries++;
10849                 rte_delay_us(100000);
10850                 if  (retries < 5)
10851                         status = i40e_aq_get_phy_capabilities(hw, false,
10852                                         true, &phy_ab, NULL);
10853                 else
10854                         return ret;
10855         }
10856         return 0;
10857 }
10858
10859 static void
10860 i40e_configure_registers(struct i40e_hw *hw)
10861 {
10862         static struct {
10863                 uint32_t addr;
10864                 uint64_t val;
10865         } reg_table[] = {
10866                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10867                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10868                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10869         };
10870         uint64_t reg;
10871         uint32_t i;
10872         int ret;
10873
10874         for (i = 0; i < RTE_DIM(reg_table); i++) {
10875                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10876                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10877                                 reg_table[i].val =
10878                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10879                         else /* For X710/XL710/XXV710 */
10880                                 if (hw->aq.fw_maj_ver < 6)
10881                                         reg_table[i].val =
10882                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10883                                 else
10884                                         reg_table[i].val =
10885                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10886                 }
10887
10888                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10889                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10890                                 reg_table[i].val =
10891                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10892                         else /* For X710/XL710/XXV710 */
10893                                 reg_table[i].val =
10894                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10895                 }
10896
10897                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10898                         uint32_t cfg_val;
10899
10900                         if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10901                                 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10902                                             "GL_SWR_PM_UP_THR value fixup",
10903                                             hw->device_id);
10904                                 continue;
10905                         }
10906
10907                         reg_table[i].val = cfg_val;
10908                 }
10909
10910                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10911                                                         &reg, NULL);
10912                 if (ret < 0) {
10913                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10914                                                         reg_table[i].addr);
10915                         break;
10916                 }
10917                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10918                                                 reg_table[i].addr, reg);
10919                 if (reg == reg_table[i].val)
10920                         continue;
10921
10922                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10923                                                 reg_table[i].val, NULL);
10924                 if (ret < 0) {
10925                         PMD_DRV_LOG(ERR,
10926                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10927                                 reg_table[i].val, reg_table[i].addr);
10928                         break;
10929                 }
10930                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10931                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10932         }
10933 }
10934
10935 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10936 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10937 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10938 static int
10939 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10940 {
10941         uint32_t reg;
10942         int ret;
10943
10944         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10945                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10946                 return -EINVAL;
10947         }
10948
10949         /* Configure for double VLAN RX stripping */
10950         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10951         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10952                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10953                 ret = i40e_aq_debug_write_register(hw,
10954                                                    I40E_VSI_TSR(vsi->vsi_id),
10955                                                    reg, NULL);
10956                 if (ret < 0) {
10957                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10958                                     vsi->vsi_id);
10959                         return I40E_ERR_CONFIG;
10960                 }
10961         }
10962
10963         /* Configure for double VLAN TX insertion */
10964         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10965         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10966                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10967                 ret = i40e_aq_debug_write_register(hw,
10968                                                    I40E_VSI_L2TAGSTXVALID(
10969                                                    vsi->vsi_id), reg, NULL);
10970                 if (ret < 0) {
10971                         PMD_DRV_LOG(ERR,
10972                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10973                                 vsi->vsi_id);
10974                         return I40E_ERR_CONFIG;
10975                 }
10976         }
10977
10978         return 0;
10979 }
10980
10981 /**
10982  * i40e_aq_add_mirror_rule
10983  * @hw: pointer to the hardware structure
10984  * @seid: VEB seid to add mirror rule to
10985  * @dst_id: destination vsi seid
10986  * @entries: Buffer which contains the entities to be mirrored
10987  * @count: number of entities contained in the buffer
10988  * @rule_id:the rule_id of the rule to be added
10989  *
10990  * Add a mirror rule for a given veb.
10991  *
10992  **/
10993 static enum i40e_status_code
10994 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10995                         uint16_t seid, uint16_t dst_id,
10996                         uint16_t rule_type, uint16_t *entries,
10997                         uint16_t count, uint16_t *rule_id)
10998 {
10999         struct i40e_aq_desc desc;
11000         struct i40e_aqc_add_delete_mirror_rule cmd;
11001         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
11002                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
11003                 &desc.params.raw;
11004         uint16_t buff_len;
11005         enum i40e_status_code status;
11006
11007         i40e_fill_default_direct_cmd_desc(&desc,
11008                                           i40e_aqc_opc_add_mirror_rule);
11009         memset(&cmd, 0, sizeof(cmd));
11010
11011         buff_len = sizeof(uint16_t) * count;
11012         desc.datalen = rte_cpu_to_le_16(buff_len);
11013         if (buff_len > 0)
11014                 desc.flags |= rte_cpu_to_le_16(
11015                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
11016         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
11017                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
11018         cmd.num_entries = rte_cpu_to_le_16(count);
11019         cmd.seid = rte_cpu_to_le_16(seid);
11020         cmd.destination = rte_cpu_to_le_16(dst_id);
11021
11022         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
11023         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
11024         PMD_DRV_LOG(INFO,
11025                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
11026                 hw->aq.asq_last_status, resp->rule_id,
11027                 resp->mirror_rules_used, resp->mirror_rules_free);
11028         *rule_id = rte_le_to_cpu_16(resp->rule_id);
11029
11030         return status;
11031 }
11032
11033 /**
11034  * i40e_aq_del_mirror_rule
11035  * @hw: pointer to the hardware structure
11036  * @seid: VEB seid to add mirror rule to
11037  * @entries: Buffer which contains the entities to be mirrored
11038  * @count: number of entities contained in the buffer
11039  * @rule_id:the rule_id of the rule to be delete
11040  *
11041  * Delete a mirror rule for a given veb.
11042  *
11043  **/
11044 static enum i40e_status_code
11045 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
11046                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
11047                 uint16_t count, uint16_t rule_id)
11048 {
11049         struct i40e_aq_desc desc;
11050         struct i40e_aqc_add_delete_mirror_rule cmd;
11051         uint16_t buff_len = 0;
11052         enum i40e_status_code status;
11053         void *buff = NULL;
11054
11055         i40e_fill_default_direct_cmd_desc(&desc,
11056                                           i40e_aqc_opc_delete_mirror_rule);
11057         memset(&cmd, 0, sizeof(cmd));
11058         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
11059                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
11060                                                           I40E_AQ_FLAG_RD));
11061                 cmd.num_entries = count;
11062                 buff_len = sizeof(uint16_t) * count;
11063                 desc.datalen = rte_cpu_to_le_16(buff_len);
11064                 buff = (void *)entries;
11065         } else
11066                 /* rule id is filled in destination field for deleting mirror rule */
11067                 cmd.destination = rte_cpu_to_le_16(rule_id);
11068
11069         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
11070                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
11071         cmd.seid = rte_cpu_to_le_16(seid);
11072
11073         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
11074         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
11075
11076         return status;
11077 }
11078
11079 /**
11080  * i40e_mirror_rule_set
11081  * @dev: pointer to the hardware structure
11082  * @mirror_conf: mirror rule info
11083  * @sw_id: mirror rule's sw_id
11084  * @on: enable/disable
11085  *
11086  * set a mirror rule.
11087  *
11088  **/
11089 static int
11090 i40e_mirror_rule_set(struct rte_eth_dev *dev,
11091                         struct rte_eth_mirror_conf *mirror_conf,
11092                         uint8_t sw_id, uint8_t on)
11093 {
11094         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11095         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11096         struct i40e_mirror_rule *it, *mirr_rule = NULL;
11097         struct i40e_mirror_rule *parent = NULL;
11098         uint16_t seid, dst_seid, rule_id;
11099         uint16_t i, j = 0;
11100         int ret;
11101
11102         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
11103
11104         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
11105                 PMD_DRV_LOG(ERR,
11106                         "mirror rule can not be configured without veb or vfs.");
11107                 return -ENOSYS;
11108         }
11109         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
11110                 PMD_DRV_LOG(ERR, "mirror table is full.");
11111                 return -ENOSPC;
11112         }
11113         if (mirror_conf->dst_pool > pf->vf_num) {
11114                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
11115                                  mirror_conf->dst_pool);
11116                 return -EINVAL;
11117         }
11118
11119         seid = pf->main_vsi->veb->seid;
11120
11121         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11122                 if (sw_id <= it->index) {
11123                         mirr_rule = it;
11124                         break;
11125                 }
11126                 parent = it;
11127         }
11128         if (mirr_rule && sw_id == mirr_rule->index) {
11129                 if (on) {
11130                         PMD_DRV_LOG(ERR, "mirror rule exists.");
11131                         return -EEXIST;
11132                 } else {
11133                         ret = i40e_aq_del_mirror_rule(hw, seid,
11134                                         mirr_rule->rule_type,
11135                                         mirr_rule->entries,
11136                                         mirr_rule->num_entries, mirr_rule->id);
11137                         if (ret < 0) {
11138                                 PMD_DRV_LOG(ERR,
11139                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
11140                                         ret, hw->aq.asq_last_status);
11141                                 return -ENOSYS;
11142                         }
11143                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11144                         rte_free(mirr_rule);
11145                         pf->nb_mirror_rule--;
11146                         return 0;
11147                 }
11148         } else if (!on) {
11149                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11150                 return -ENOENT;
11151         }
11152
11153         mirr_rule = rte_zmalloc("i40e_mirror_rule",
11154                                 sizeof(struct i40e_mirror_rule) , 0);
11155         if (!mirr_rule) {
11156                 PMD_DRV_LOG(ERR, "failed to allocate memory");
11157                 return I40E_ERR_NO_MEMORY;
11158         }
11159         switch (mirror_conf->rule_type) {
11160         case ETH_MIRROR_VLAN:
11161                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
11162                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
11163                                 mirr_rule->entries[j] =
11164                                         mirror_conf->vlan.vlan_id[i];
11165                                 j++;
11166                         }
11167                 }
11168                 if (j == 0) {
11169                         PMD_DRV_LOG(ERR, "vlan is not specified.");
11170                         rte_free(mirr_rule);
11171                         return -EINVAL;
11172                 }
11173                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
11174                 break;
11175         case ETH_MIRROR_VIRTUAL_POOL_UP:
11176         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
11177                 /* check if the specified pool bit is out of range */
11178                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
11179                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
11180                         rte_free(mirr_rule);
11181                         return -EINVAL;
11182                 }
11183                 for (i = 0, j = 0; i < pf->vf_num; i++) {
11184                         if (mirror_conf->pool_mask & (1ULL << i)) {
11185                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
11186                                 j++;
11187                         }
11188                 }
11189                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
11190                         /* add pf vsi to entries */
11191                         mirr_rule->entries[j] = pf->main_vsi_seid;
11192                         j++;
11193                 }
11194                 if (j == 0) {
11195                         PMD_DRV_LOG(ERR, "pool is not specified.");
11196                         rte_free(mirr_rule);
11197                         return -EINVAL;
11198                 }
11199                 /* egress and ingress in aq commands means from switch but not port */
11200                 mirr_rule->rule_type =
11201                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
11202                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
11203                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
11204                 break;
11205         case ETH_MIRROR_UPLINK_PORT:
11206                 /* egress and ingress in aq commands means from switch but not port*/
11207                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
11208                 break;
11209         case ETH_MIRROR_DOWNLINK_PORT:
11210                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
11211                 break;
11212         default:
11213                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
11214                         mirror_conf->rule_type);
11215                 rte_free(mirr_rule);
11216                 return -EINVAL;
11217         }
11218
11219         /* If the dst_pool is equal to vf_num, consider it as PF */
11220         if (mirror_conf->dst_pool == pf->vf_num)
11221                 dst_seid = pf->main_vsi_seid;
11222         else
11223                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
11224
11225         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
11226                                       mirr_rule->rule_type, mirr_rule->entries,
11227                                       j, &rule_id);
11228         if (ret < 0) {
11229                 PMD_DRV_LOG(ERR,
11230                         "failed to add mirror rule: ret = %d, aq_err = %d.",
11231                         ret, hw->aq.asq_last_status);
11232                 rte_free(mirr_rule);
11233                 return -ENOSYS;
11234         }
11235
11236         mirr_rule->index = sw_id;
11237         mirr_rule->num_entries = j;
11238         mirr_rule->id = rule_id;
11239         mirr_rule->dst_vsi_seid = dst_seid;
11240
11241         if (parent)
11242                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
11243         else
11244                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
11245
11246         pf->nb_mirror_rule++;
11247         return 0;
11248 }
11249
11250 /**
11251  * i40e_mirror_rule_reset
11252  * @dev: pointer to the device
11253  * @sw_id: mirror rule's sw_id
11254  *
11255  * reset a mirror rule.
11256  *
11257  **/
11258 static int
11259 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
11260 {
11261         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11262         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11263         struct i40e_mirror_rule *it, *mirr_rule = NULL;
11264         uint16_t seid;
11265         int ret;
11266
11267         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
11268
11269         seid = pf->main_vsi->veb->seid;
11270
11271         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
11272                 if (sw_id == it->index) {
11273                         mirr_rule = it;
11274                         break;
11275                 }
11276         }
11277         if (mirr_rule) {
11278                 ret = i40e_aq_del_mirror_rule(hw, seid,
11279                                 mirr_rule->rule_type,
11280                                 mirr_rule->entries,
11281                                 mirr_rule->num_entries, mirr_rule->id);
11282                 if (ret < 0) {
11283                         PMD_DRV_LOG(ERR,
11284                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
11285                                 ret, hw->aq.asq_last_status);
11286                         return -ENOSYS;
11287                 }
11288                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
11289                 rte_free(mirr_rule);
11290                 pf->nb_mirror_rule--;
11291         } else {
11292                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
11293                 return -ENOENT;
11294         }
11295         return 0;
11296 }
11297
11298 static uint64_t
11299 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
11300 {
11301         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11302         uint64_t systim_cycles;
11303
11304         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
11305         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
11306                         << 32;
11307
11308         return systim_cycles;
11309 }
11310
11311 static uint64_t
11312 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
11313 {
11314         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11315         uint64_t rx_tstamp;
11316
11317         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
11318         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
11319                         << 32;
11320
11321         return rx_tstamp;
11322 }
11323
11324 static uint64_t
11325 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
11326 {
11327         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11328         uint64_t tx_tstamp;
11329
11330         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
11331         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
11332                         << 32;
11333
11334         return tx_tstamp;
11335 }
11336
11337 static void
11338 i40e_start_timecounters(struct rte_eth_dev *dev)
11339 {
11340         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11341         struct i40e_adapter *adapter = dev->data->dev_private;
11342         struct rte_eth_link link;
11343         uint32_t tsync_inc_l;
11344         uint32_t tsync_inc_h;
11345
11346         /* Get current link speed. */
11347         i40e_dev_link_update(dev, 1);
11348         rte_eth_linkstatus_get(dev, &link);
11349
11350         switch (link.link_speed) {
11351         case ETH_SPEED_NUM_40G:
11352         case ETH_SPEED_NUM_25G:
11353                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
11354                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
11355                 break;
11356         case ETH_SPEED_NUM_10G:
11357                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
11358                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
11359                 break;
11360         case ETH_SPEED_NUM_1G:
11361                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
11362                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
11363                 break;
11364         default:
11365                 tsync_inc_l = 0x0;
11366                 tsync_inc_h = 0x0;
11367         }
11368
11369         /* Set the timesync increment value. */
11370         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
11371         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
11372
11373         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
11374         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11375         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
11376
11377         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11378         adapter->systime_tc.cc_shift = 0;
11379         adapter->systime_tc.nsec_mask = 0;
11380
11381         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11382         adapter->rx_tstamp_tc.cc_shift = 0;
11383         adapter->rx_tstamp_tc.nsec_mask = 0;
11384
11385         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
11386         adapter->tx_tstamp_tc.cc_shift = 0;
11387         adapter->tx_tstamp_tc.nsec_mask = 0;
11388 }
11389
11390 static int
11391 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
11392 {
11393         struct i40e_adapter *adapter = dev->data->dev_private;
11394
11395         adapter->systime_tc.nsec += delta;
11396         adapter->rx_tstamp_tc.nsec += delta;
11397         adapter->tx_tstamp_tc.nsec += delta;
11398
11399         return 0;
11400 }
11401
11402 static int
11403 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
11404 {
11405         uint64_t ns;
11406         struct i40e_adapter *adapter = dev->data->dev_private;
11407
11408         ns = rte_timespec_to_ns(ts);
11409
11410         /* Set the timecounters to a new value. */
11411         adapter->systime_tc.nsec = ns;
11412         adapter->rx_tstamp_tc.nsec = ns;
11413         adapter->tx_tstamp_tc.nsec = ns;
11414
11415         return 0;
11416 }
11417
11418 static int
11419 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
11420 {
11421         uint64_t ns, systime_cycles;
11422         struct i40e_adapter *adapter = dev->data->dev_private;
11423
11424         systime_cycles = i40e_read_systime_cyclecounter(dev);
11425         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
11426         *ts = rte_ns_to_timespec(ns);
11427
11428         return 0;
11429 }
11430
11431 static int
11432 i40e_timesync_enable(struct rte_eth_dev *dev)
11433 {
11434         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11435         uint32_t tsync_ctl_l;
11436         uint32_t tsync_ctl_h;
11437
11438         /* Stop the timesync system time. */
11439         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11440         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11441         /* Reset the timesync system time value. */
11442         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
11443         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
11444
11445         i40e_start_timecounters(dev);
11446
11447         /* Clear timesync registers. */
11448         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11449         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
11450         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
11451         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
11452         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
11453         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
11454
11455         /* Enable timestamping of PTP packets. */
11456         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11457         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
11458
11459         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11460         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
11461         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
11462
11463         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11464         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11465
11466         return 0;
11467 }
11468
11469 static int
11470 i40e_timesync_disable(struct rte_eth_dev *dev)
11471 {
11472         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11473         uint32_t tsync_ctl_l;
11474         uint32_t tsync_ctl_h;
11475
11476         /* Disable timestamping of transmitted PTP packets. */
11477         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11478         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
11479
11480         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11481         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
11482
11483         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11484         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11485
11486         /* Reset the timesync increment value. */
11487         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11488         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11489
11490         return 0;
11491 }
11492
11493 static int
11494 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
11495                                 struct timespec *timestamp, uint32_t flags)
11496 {
11497         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11498         struct i40e_adapter *adapter = dev->data->dev_private;
11499         uint32_t sync_status;
11500         uint32_t index = flags & 0x03;
11501         uint64_t rx_tstamp_cycles;
11502         uint64_t ns;
11503
11504         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
11505         if ((sync_status & (1 << index)) == 0)
11506                 return -EINVAL;
11507
11508         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
11509         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
11510         *timestamp = rte_ns_to_timespec(ns);
11511
11512         return 0;
11513 }
11514
11515 static int
11516 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
11517                                 struct timespec *timestamp)
11518 {
11519         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11520         struct i40e_adapter *adapter = dev->data->dev_private;
11521         uint32_t sync_status;
11522         uint64_t tx_tstamp_cycles;
11523         uint64_t ns;
11524
11525         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11526         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
11527                 return -EINVAL;
11528
11529         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
11530         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
11531         *timestamp = rte_ns_to_timespec(ns);
11532
11533         return 0;
11534 }
11535
11536 /*
11537  * i40e_parse_dcb_configure - parse dcb configure from user
11538  * @dev: the device being configured
11539  * @dcb_cfg: pointer of the result of parse
11540  * @*tc_map: bit map of enabled traffic classes
11541  *
11542  * Returns 0 on success, negative value on failure
11543  */
11544 static int
11545 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
11546                          struct i40e_dcbx_config *dcb_cfg,
11547                          uint8_t *tc_map)
11548 {
11549         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
11550         uint8_t i, tc_bw, bw_lf;
11551
11552         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
11553
11554         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
11555         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
11556                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
11557                 return -EINVAL;
11558         }
11559
11560         /* assume each tc has the same bw */
11561         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
11562         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11563                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
11564         /* to ensure the sum of tcbw is equal to 100 */
11565         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
11566         for (i = 0; i < bw_lf; i++)
11567                 dcb_cfg->etscfg.tcbwtable[i]++;
11568
11569         /* assume each tc has the same Transmission Selection Algorithm */
11570         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11571                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
11572
11573         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11574                 dcb_cfg->etscfg.prioritytable[i] =
11575                                 dcb_rx_conf->dcb_tc[i];
11576
11577         /* FW needs one App to configure HW */
11578         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
11579         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
11580         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
11581         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
11582
11583         if (dcb_rx_conf->nb_tcs == 0)
11584                 *tc_map = 1; /* tc0 only */
11585         else
11586                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
11587
11588         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
11589                 dcb_cfg->pfc.willing = 0;
11590                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
11591                 dcb_cfg->pfc.pfcenable = *tc_map;
11592         }
11593         return 0;
11594 }
11595
11596
11597 static enum i40e_status_code
11598 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
11599                               struct i40e_aqc_vsi_properties_data *info,
11600                               uint8_t enabled_tcmap)
11601 {
11602         enum i40e_status_code ret;
11603         int i, total_tc = 0;
11604         uint16_t qpnum_per_tc, bsf, qp_idx;
11605         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
11606         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
11607         uint16_t used_queues;
11608
11609         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
11610         if (ret != I40E_SUCCESS)
11611                 return ret;
11612
11613         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11614                 if (enabled_tcmap & (1 << i))
11615                         total_tc++;
11616         }
11617         if (total_tc == 0)
11618                 total_tc = 1;
11619         vsi->enabled_tc = enabled_tcmap;
11620
11621         /* different VSI has different queues assigned */
11622         if (vsi->type == I40E_VSI_MAIN)
11623                 used_queues = dev_data->nb_rx_queues -
11624                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11625         else if (vsi->type == I40E_VSI_VMDQ2)
11626                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11627         else {
11628                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
11629                 return I40E_ERR_NO_AVAILABLE_VSI;
11630         }
11631
11632         qpnum_per_tc = used_queues / total_tc;
11633         /* Number of queues per enabled TC */
11634         if (qpnum_per_tc == 0) {
11635                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
11636                 return I40E_ERR_INVALID_QP_ID;
11637         }
11638         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
11639                                 I40E_MAX_Q_PER_TC);
11640         bsf = rte_bsf32(qpnum_per_tc);
11641
11642         /**
11643          * Configure TC and queue mapping parameters, for enabled TC,
11644          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
11645          * default queue will serve it.
11646          */
11647         qp_idx = 0;
11648         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11649                 if (vsi->enabled_tc & (1 << i)) {
11650                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
11651                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
11652                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
11653                         qp_idx += qpnum_per_tc;
11654                 } else
11655                         info->tc_mapping[i] = 0;
11656         }
11657
11658         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
11659         if (vsi->type == I40E_VSI_SRIOV) {
11660                 info->mapping_flags |=
11661                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
11662                 for (i = 0; i < vsi->nb_qps; i++)
11663                         info->queue_mapping[i] =
11664                                 rte_cpu_to_le_16(vsi->base_queue + i);
11665         } else {
11666                 info->mapping_flags |=
11667                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
11668                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
11669         }
11670         info->valid_sections |=
11671                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
11672
11673         return I40E_SUCCESS;
11674 }
11675
11676 /*
11677  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
11678  * @veb: VEB to be configured
11679  * @tc_map: enabled TC bitmap
11680  *
11681  * Returns 0 on success, negative value on failure
11682  */
11683 static enum i40e_status_code
11684 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
11685 {
11686         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
11687         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
11688         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
11689         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
11690         enum i40e_status_code ret = I40E_SUCCESS;
11691         int i;
11692         uint32_t bw_max;
11693
11694         /* Check if enabled_tc is same as existing or new TCs */
11695         if (veb->enabled_tc == tc_map)
11696                 return ret;
11697
11698         /* configure tc bandwidth */
11699         memset(&veb_bw, 0, sizeof(veb_bw));
11700         veb_bw.tc_valid_bits = tc_map;
11701         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11702         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11703                 if (tc_map & BIT_ULL(i))
11704                         veb_bw.tc_bw_share_credits[i] = 1;
11705         }
11706         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
11707                                                    &veb_bw, NULL);
11708         if (ret) {
11709                 PMD_INIT_LOG(ERR,
11710                         "AQ command Config switch_comp BW allocation per TC failed = %d",
11711                         hw->aq.asq_last_status);
11712                 return ret;
11713         }
11714
11715         memset(&ets_query, 0, sizeof(ets_query));
11716         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
11717                                                    &ets_query, NULL);
11718         if (ret != I40E_SUCCESS) {
11719                 PMD_DRV_LOG(ERR,
11720                         "Failed to get switch_comp ETS configuration %u",
11721                         hw->aq.asq_last_status);
11722                 return ret;
11723         }
11724         memset(&bw_query, 0, sizeof(bw_query));
11725         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
11726                                                   &bw_query, NULL);
11727         if (ret != I40E_SUCCESS) {
11728                 PMD_DRV_LOG(ERR,
11729                         "Failed to get switch_comp bandwidth configuration %u",
11730                         hw->aq.asq_last_status);
11731                 return ret;
11732         }
11733
11734         /* store and print out BW info */
11735         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
11736         veb->bw_info.bw_max = ets_query.tc_bw_max;
11737         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
11738         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
11739         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
11740                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
11741                      I40E_16_BIT_WIDTH);
11742         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11743                 veb->bw_info.bw_ets_share_credits[i] =
11744                                 bw_query.tc_bw_share_credits[i];
11745                 veb->bw_info.bw_ets_credits[i] =
11746                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11747                 /* 4 bits per TC, 4th bit is reserved */
11748                 veb->bw_info.bw_ets_max[i] =
11749                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11750                                   RTE_LEN2MASK(3, uint8_t));
11751                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11752                             veb->bw_info.bw_ets_share_credits[i]);
11753                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11754                             veb->bw_info.bw_ets_credits[i]);
11755                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11756                             veb->bw_info.bw_ets_max[i]);
11757         }
11758
11759         veb->enabled_tc = tc_map;
11760
11761         return ret;
11762 }
11763
11764
11765 /*
11766  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11767  * @vsi: VSI to be configured
11768  * @tc_map: enabled TC bitmap
11769  *
11770  * Returns 0 on success, negative value on failure
11771  */
11772 static enum i40e_status_code
11773 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11774 {
11775         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11776         struct i40e_vsi_context ctxt;
11777         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11778         enum i40e_status_code ret = I40E_SUCCESS;
11779         int i;
11780
11781         /* Check if enabled_tc is same as existing or new TCs */
11782         if (vsi->enabled_tc == tc_map)
11783                 return ret;
11784
11785         /* configure tc bandwidth */
11786         memset(&bw_data, 0, sizeof(bw_data));
11787         bw_data.tc_valid_bits = tc_map;
11788         /* Enable ETS TCs with equal BW Share for now across all VSIs */
11789         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11790                 if (tc_map & BIT_ULL(i))
11791                         bw_data.tc_bw_credits[i] = 1;
11792         }
11793         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11794         if (ret) {
11795                 PMD_INIT_LOG(ERR,
11796                         "AQ command Config VSI BW allocation per TC failed = %d",
11797                         hw->aq.asq_last_status);
11798                 goto out;
11799         }
11800         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11801                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11802
11803         /* Update Queue Pairs Mapping for currently enabled UPs */
11804         ctxt.seid = vsi->seid;
11805         ctxt.pf_num = hw->pf_id;
11806         ctxt.vf_num = 0;
11807         ctxt.uplink_seid = vsi->uplink_seid;
11808         ctxt.info = vsi->info;
11809         i40e_get_cap(hw);
11810         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11811         if (ret)
11812                 goto out;
11813
11814         /* Update the VSI after updating the VSI queue-mapping information */
11815         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11816         if (ret) {
11817                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11818                         hw->aq.asq_last_status);
11819                 goto out;
11820         }
11821         /* update the local VSI info with updated queue map */
11822         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11823                                         sizeof(vsi->info.tc_mapping));
11824         rte_memcpy(&vsi->info.queue_mapping,
11825                         &ctxt.info.queue_mapping,
11826                 sizeof(vsi->info.queue_mapping));
11827         vsi->info.mapping_flags = ctxt.info.mapping_flags;
11828         vsi->info.valid_sections = 0;
11829
11830         /* query and update current VSI BW information */
11831         ret = i40e_vsi_get_bw_config(vsi);
11832         if (ret) {
11833                 PMD_INIT_LOG(ERR,
11834                          "Failed updating vsi bw info, err %s aq_err %s",
11835                          i40e_stat_str(hw, ret),
11836                          i40e_aq_str(hw, hw->aq.asq_last_status));
11837                 goto out;
11838         }
11839
11840         vsi->enabled_tc = tc_map;
11841
11842 out:
11843         return ret;
11844 }
11845
11846 /*
11847  * i40e_dcb_hw_configure - program the dcb setting to hw
11848  * @pf: pf the configuration is taken on
11849  * @new_cfg: new configuration
11850  * @tc_map: enabled TC bitmap
11851  *
11852  * Returns 0 on success, negative value on failure
11853  */
11854 static enum i40e_status_code
11855 i40e_dcb_hw_configure(struct i40e_pf *pf,
11856                       struct i40e_dcbx_config *new_cfg,
11857                       uint8_t tc_map)
11858 {
11859         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11860         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11861         struct i40e_vsi *main_vsi = pf->main_vsi;
11862         struct i40e_vsi_list *vsi_list;
11863         enum i40e_status_code ret;
11864         int i;
11865         uint32_t val;
11866
11867         /* Use the FW API if FW > v4.4*/
11868         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11869               (hw->aq.fw_maj_ver >= 5))) {
11870                 PMD_INIT_LOG(ERR,
11871                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11872                 return I40E_ERR_FIRMWARE_API_VERSION;
11873         }
11874
11875         /* Check if need reconfiguration */
11876         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11877                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11878                 return I40E_SUCCESS;
11879         }
11880
11881         /* Copy the new config to the current config */
11882         *old_cfg = *new_cfg;
11883         old_cfg->etsrec = old_cfg->etscfg;
11884         ret = i40e_set_dcb_config(hw);
11885         if (ret) {
11886                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11887                          i40e_stat_str(hw, ret),
11888                          i40e_aq_str(hw, hw->aq.asq_last_status));
11889                 return ret;
11890         }
11891         /* set receive Arbiter to RR mode and ETS scheme by default */
11892         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11893                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11894                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11895                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11896                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11897                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11898                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11899                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11900                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11901                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11902                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11903                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11904                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11905         }
11906         /* get local mib to check whether it is configured correctly */
11907         /* IEEE mode */
11908         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11909         /* Get Local DCB Config */
11910         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11911                                      &hw->local_dcbx_config);
11912
11913         /* if Veb is created, need to update TC of it at first */
11914         if (main_vsi->veb) {
11915                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11916                 if (ret)
11917                         PMD_INIT_LOG(WARNING,
11918                                  "Failed configuring TC for VEB seid=%d",
11919                                  main_vsi->veb->seid);
11920         }
11921         /* Update each VSI */
11922         i40e_vsi_config_tc(main_vsi, tc_map);
11923         if (main_vsi->veb) {
11924                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11925                         /* Beside main VSI and VMDQ VSIs, only enable default
11926                          * TC for other VSIs
11927                          */
11928                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11929                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11930                                                          tc_map);
11931                         else
11932                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11933                                                          I40E_DEFAULT_TCMAP);
11934                         if (ret)
11935                                 PMD_INIT_LOG(WARNING,
11936                                         "Failed configuring TC for VSI seid=%d",
11937                                         vsi_list->vsi->seid);
11938                         /* continue */
11939                 }
11940         }
11941         return I40E_SUCCESS;
11942 }
11943
11944 /*
11945  * i40e_dcb_init_configure - initial dcb config
11946  * @dev: device being configured
11947  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11948  *
11949  * Returns 0 on success, negative value on failure
11950  */
11951 int
11952 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11953 {
11954         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11955         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11956         int i, ret = 0;
11957
11958         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11959                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11960                 return -ENOTSUP;
11961         }
11962
11963         /* DCB initialization:
11964          * Update DCB configuration from the Firmware and configure
11965          * LLDP MIB change event.
11966          */
11967         if (sw_dcb == TRUE) {
11968                 /* Stopping lldp is necessary for DPDK, but it will cause
11969                  * DCB init failed. For i40e_init_dcb(), the prerequisite
11970                  * for successful initialization of DCB is that LLDP is
11971                  * enabled. So it is needed to start lldp before DCB init
11972                  * and stop it after initialization.
11973                  */
11974                 ret = i40e_aq_start_lldp(hw, true, NULL);
11975                 if (ret != I40E_SUCCESS)
11976                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11977
11978                 ret = i40e_init_dcb(hw, true);
11979                 /* If lldp agent is stopped, the return value from
11980                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11981                  * adminq status. Otherwise, it should return success.
11982                  */
11983                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11984                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11985                         memset(&hw->local_dcbx_config, 0,
11986                                 sizeof(struct i40e_dcbx_config));
11987                         /* set dcb default configuration */
11988                         hw->local_dcbx_config.etscfg.willing = 0;
11989                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11990                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11991                         hw->local_dcbx_config.etscfg.tsatable[0] =
11992                                                 I40E_IEEE_TSA_ETS;
11993                         /* all UPs mapping to TC0 */
11994                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11995                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11996                         hw->local_dcbx_config.etsrec =
11997                                 hw->local_dcbx_config.etscfg;
11998                         hw->local_dcbx_config.pfc.willing = 0;
11999                         hw->local_dcbx_config.pfc.pfccap =
12000                                                 I40E_MAX_TRAFFIC_CLASS;
12001                         /* FW needs one App to configure HW */
12002                         hw->local_dcbx_config.numapps = 1;
12003                         hw->local_dcbx_config.app[0].selector =
12004                                                 I40E_APP_SEL_ETHTYPE;
12005                         hw->local_dcbx_config.app[0].priority = 3;
12006                         hw->local_dcbx_config.app[0].protocolid =
12007                                                 I40E_APP_PROTOID_FCOE;
12008                         ret = i40e_set_dcb_config(hw);
12009                         if (ret) {
12010                                 PMD_INIT_LOG(ERR,
12011                                         "default dcb config fails. err = %d, aq_err = %d.",
12012                                         ret, hw->aq.asq_last_status);
12013                                 return -ENOSYS;
12014                         }
12015                 } else {
12016                         PMD_INIT_LOG(ERR,
12017                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
12018                                 ret, hw->aq.asq_last_status);
12019                         return -ENOTSUP;
12020                 }
12021
12022                 if (i40e_need_stop_lldp(dev)) {
12023                         ret = i40e_aq_stop_lldp(hw, true, true, NULL);
12024                         if (ret != I40E_SUCCESS)
12025                                 PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
12026                 }
12027         } else {
12028                 ret = i40e_aq_start_lldp(hw, true, NULL);
12029                 if (ret != I40E_SUCCESS)
12030                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
12031
12032                 ret = i40e_init_dcb(hw, true);
12033                 if (!ret) {
12034                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
12035                                 PMD_INIT_LOG(ERR,
12036                                         "HW doesn't support DCBX offload.");
12037                                 return -ENOTSUP;
12038                         }
12039                 } else {
12040                         PMD_INIT_LOG(ERR,
12041                                 "DCBX configuration failed, err = %d, aq_err = %d.",
12042                                 ret, hw->aq.asq_last_status);
12043                         return -ENOTSUP;
12044                 }
12045         }
12046         return 0;
12047 }
12048
12049 /*
12050  * i40e_dcb_setup - setup dcb related config
12051  * @dev: device being configured
12052  *
12053  * Returns 0 on success, negative value on failure
12054  */
12055 static int
12056 i40e_dcb_setup(struct rte_eth_dev *dev)
12057 {
12058         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12059         struct i40e_dcbx_config dcb_cfg;
12060         uint8_t tc_map = 0;
12061         int ret = 0;
12062
12063         if ((pf->flags & I40E_FLAG_DCB) == 0) {
12064                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
12065                 return -ENOTSUP;
12066         }
12067
12068         if (pf->vf_num != 0)
12069                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
12070
12071         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
12072         if (ret) {
12073                 PMD_INIT_LOG(ERR, "invalid dcb config");
12074                 return -EINVAL;
12075         }
12076         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
12077         if (ret) {
12078                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
12079                 return -ENOSYS;
12080         }
12081
12082         return 0;
12083 }
12084
12085 static int
12086 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
12087                       struct rte_eth_dcb_info *dcb_info)
12088 {
12089         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12090         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12091         struct i40e_vsi *vsi = pf->main_vsi;
12092         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
12093         uint16_t bsf, tc_mapping;
12094         int i, j = 0;
12095
12096         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
12097                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
12098         else
12099                 dcb_info->nb_tcs = 1;
12100         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
12101                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
12102         for (i = 0; i < dcb_info->nb_tcs; i++)
12103                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
12104
12105         /* get queue mapping if vmdq is disabled */
12106         if (!pf->nb_cfg_vmdq_vsi) {
12107                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12108                         if (!(vsi->enabled_tc & (1 << i)))
12109                                 continue;
12110                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
12111                         dcb_info->tc_queue.tc_rxq[j][i].base =
12112                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12113                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12114                         dcb_info->tc_queue.tc_txq[j][i].base =
12115                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12116                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12117                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12118                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12119                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12120                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12121                 }
12122                 return 0;
12123         }
12124
12125         /* get queue mapping if vmdq is enabled */
12126         do {
12127                 vsi = pf->vmdq[j].vsi;
12128                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12129                         if (!(vsi->enabled_tc & (1 << i)))
12130                                 continue;
12131                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
12132                         dcb_info->tc_queue.tc_rxq[j][i].base =
12133                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
12134                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
12135                         dcb_info->tc_queue.tc_txq[j][i].base =
12136                                 dcb_info->tc_queue.tc_rxq[j][i].base;
12137                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
12138                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
12139                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
12140                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
12141                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
12142                 }
12143                 j++;
12144         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
12145         return 0;
12146 }
12147
12148 static int
12149 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
12150 {
12151         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12152         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12153         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12154         uint16_t msix_intr;
12155
12156         msix_intr = intr_handle->intr_vec[queue_id];
12157         if (msix_intr == I40E_MISC_VEC_ID)
12158                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12159                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
12160                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
12161                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12162         else
12163                 I40E_WRITE_REG(hw,
12164                                I40E_PFINT_DYN_CTLN(msix_intr -
12165                                                    I40E_RX_VEC_START),
12166                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
12167                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
12168                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12169
12170         I40E_WRITE_FLUSH(hw);
12171         rte_intr_ack(&pci_dev->intr_handle);
12172
12173         return 0;
12174 }
12175
12176 static int
12177 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
12178 {
12179         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12180         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
12181         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12182         uint16_t msix_intr;
12183
12184         msix_intr = intr_handle->intr_vec[queue_id];
12185         if (msix_intr == I40E_MISC_VEC_ID)
12186                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
12187                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
12188         else
12189                 I40E_WRITE_REG(hw,
12190                                I40E_PFINT_DYN_CTLN(msix_intr -
12191                                                    I40E_RX_VEC_START),
12192                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
12193         I40E_WRITE_FLUSH(hw);
12194
12195         return 0;
12196 }
12197
12198 /**
12199  * This function is used to check if the register is valid.
12200  * Below is the valid registers list for X722 only:
12201  * 0x2b800--0x2bb00
12202  * 0x38700--0x38a00
12203  * 0x3d800--0x3db00
12204  * 0x208e00--0x209000
12205  * 0x20be00--0x20c000
12206  * 0x263c00--0x264000
12207  * 0x265c00--0x266000
12208  */
12209 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
12210 {
12211         if ((type != I40E_MAC_X722) &&
12212             ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
12213              (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
12214              (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
12215              (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
12216              (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
12217              (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
12218              (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
12219                 return 0;
12220         else
12221                 return 1;
12222 }
12223
12224 static int i40e_get_regs(struct rte_eth_dev *dev,
12225                          struct rte_dev_reg_info *regs)
12226 {
12227         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12228         uint32_t *ptr_data = regs->data;
12229         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
12230         const struct i40e_reg_info *reg_info;
12231
12232         if (ptr_data == NULL) {
12233                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
12234                 regs->width = sizeof(uint32_t);
12235                 return 0;
12236         }
12237
12238         /* The first few registers have to be read using AQ operations */
12239         reg_idx = 0;
12240         while (i40e_regs_adminq[reg_idx].name) {
12241                 reg_info = &i40e_regs_adminq[reg_idx++];
12242                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12243                         for (arr_idx2 = 0;
12244                                         arr_idx2 <= reg_info->count2;
12245                                         arr_idx2++) {
12246                                 reg_offset = arr_idx * reg_info->stride1 +
12247                                         arr_idx2 * reg_info->stride2;
12248                                 reg_offset += reg_info->base_addr;
12249                                 ptr_data[reg_offset >> 2] =
12250                                         i40e_read_rx_ctl(hw, reg_offset);
12251                         }
12252         }
12253
12254         /* The remaining registers can be read using primitives */
12255         reg_idx = 0;
12256         while (i40e_regs_others[reg_idx].name) {
12257                 reg_info = &i40e_regs_others[reg_idx++];
12258                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
12259                         for (arr_idx2 = 0;
12260                                         arr_idx2 <= reg_info->count2;
12261                                         arr_idx2++) {
12262                                 reg_offset = arr_idx * reg_info->stride1 +
12263                                         arr_idx2 * reg_info->stride2;
12264                                 reg_offset += reg_info->base_addr;
12265                                 if (!i40e_valid_regs(hw->mac.type, reg_offset))
12266                                         ptr_data[reg_offset >> 2] = 0;
12267                                 else
12268                                         ptr_data[reg_offset >> 2] =
12269                                                 I40E_READ_REG(hw, reg_offset);
12270                         }
12271         }
12272
12273         return 0;
12274 }
12275
12276 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
12277 {
12278         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12279
12280         /* Convert word count to byte count */
12281         return hw->nvm.sr_size << 1;
12282 }
12283
12284 static int i40e_get_eeprom(struct rte_eth_dev *dev,
12285                            struct rte_dev_eeprom_info *eeprom)
12286 {
12287         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12288         uint16_t *data = eeprom->data;
12289         uint16_t offset, length, cnt_words;
12290         int ret_code;
12291
12292         offset = eeprom->offset >> 1;
12293         length = eeprom->length >> 1;
12294         cnt_words = length;
12295
12296         if (offset > hw->nvm.sr_size ||
12297                 offset + length > hw->nvm.sr_size) {
12298                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
12299                 return -EINVAL;
12300         }
12301
12302         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
12303
12304         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
12305         if (ret_code != I40E_SUCCESS || cnt_words != length) {
12306                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
12307                 return -EIO;
12308         }
12309
12310         return 0;
12311 }
12312
12313 static int i40e_get_module_info(struct rte_eth_dev *dev,
12314                                 struct rte_eth_dev_module_info *modinfo)
12315 {
12316         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12317         uint32_t sff8472_comp = 0;
12318         uint32_t sff8472_swap = 0;
12319         uint32_t sff8636_rev = 0;
12320         i40e_status status;
12321         uint32_t type = 0;
12322
12323         /* Check if firmware supports reading module EEPROM. */
12324         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
12325                 PMD_DRV_LOG(ERR,
12326                             "Module EEPROM memory read not supported. "
12327                             "Please update the NVM image.\n");
12328                 return -EINVAL;
12329         }
12330
12331         status = i40e_update_link_info(hw);
12332         if (status)
12333                 return -EIO;
12334
12335         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
12336                 PMD_DRV_LOG(ERR,
12337                             "Cannot read module EEPROM memory. "
12338                             "No module connected.\n");
12339                 return -EINVAL;
12340         }
12341
12342         type = hw->phy.link_info.module_type[0];
12343
12344         switch (type) {
12345         case I40E_MODULE_TYPE_SFP:
12346                 status = i40e_aq_get_phy_register(hw,
12347                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12348                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12349                                 I40E_MODULE_SFF_8472_COMP,
12350                                 &sff8472_comp, NULL);
12351                 if (status)
12352                         return -EIO;
12353
12354                 status = i40e_aq_get_phy_register(hw,
12355                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12356                                 I40E_I2C_EEPROM_DEV_ADDR, 1,
12357                                 I40E_MODULE_SFF_8472_SWAP,
12358                                 &sff8472_swap, NULL);
12359                 if (status)
12360                         return -EIO;
12361
12362                 /* Check if the module requires address swap to access
12363                  * the other EEPROM memory page.
12364                  */
12365                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
12366                         PMD_DRV_LOG(WARNING,
12367                                     "Module address swap to access "
12368                                     "page 0xA2 is not supported.\n");
12369                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12370                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12371                 } else if (sff8472_comp == 0x00) {
12372                         /* Module is not SFF-8472 compliant */
12373                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
12374                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
12375                 } else {
12376                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
12377                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
12378                 }
12379                 break;
12380         case I40E_MODULE_TYPE_QSFP_PLUS:
12381                 /* Read from memory page 0. */
12382                 status = i40e_aq_get_phy_register(hw,
12383                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12384                                 0, 1,
12385                                 I40E_MODULE_REVISION_ADDR,
12386                                 &sff8636_rev, NULL);
12387                 if (status)
12388                         return -EIO;
12389                 /* Determine revision compliance byte */
12390                 if (sff8636_rev > 0x02) {
12391                         /* Module is SFF-8636 compliant */
12392                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
12393                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12394                 } else {
12395                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
12396                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12397                 }
12398                 break;
12399         case I40E_MODULE_TYPE_QSFP28:
12400                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
12401                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12402                 break;
12403         default:
12404                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
12405                 return -EINVAL;
12406         }
12407         return 0;
12408 }
12409
12410 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
12411                                   struct rte_dev_eeprom_info *info)
12412 {
12413         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12414         bool is_sfp = false;
12415         i40e_status status;
12416         uint8_t *data;
12417         uint32_t value = 0;
12418         uint32_t i;
12419
12420         if (!info || !info->length || !info->data)
12421                 return -EINVAL;
12422
12423         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
12424                 is_sfp = true;
12425
12426         data = info->data;
12427         for (i = 0; i < info->length; i++) {
12428                 u32 offset = i + info->offset;
12429                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
12430
12431                 /* Check if we need to access the other memory page */
12432                 if (is_sfp) {
12433                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
12434                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
12435                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
12436                         }
12437                 } else {
12438                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
12439                                 /* Compute memory page number and offset. */
12440                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
12441                                 addr++;
12442                         }
12443                 }
12444                 status = i40e_aq_get_phy_register(hw,
12445                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12446                                 addr, 1, offset, &value, NULL);
12447                 if (status)
12448                         return -EIO;
12449                 data[i] = (uint8_t)value;
12450         }
12451         return 0;
12452 }
12453
12454 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
12455                                      struct rte_ether_addr *mac_addr)
12456 {
12457         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12458         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12459         struct i40e_vsi *vsi = pf->main_vsi;
12460         struct i40e_mac_filter_info mac_filter;
12461         struct i40e_mac_filter *f;
12462         int ret;
12463
12464         if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
12465                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
12466                 return -EINVAL;
12467         }
12468
12469         TAILQ_FOREACH(f, &vsi->mac_list, next) {
12470                 if (rte_is_same_ether_addr(&pf->dev_addr,
12471                                                 &f->mac_info.mac_addr))
12472                         break;
12473         }
12474
12475         if (f == NULL) {
12476                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
12477                 return -EIO;
12478         }
12479
12480         mac_filter = f->mac_info;
12481         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
12482         if (ret != I40E_SUCCESS) {
12483                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
12484                 return -EIO;
12485         }
12486         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
12487         ret = i40e_vsi_add_mac(vsi, &mac_filter);
12488         if (ret != I40E_SUCCESS) {
12489                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
12490                 return -EIO;
12491         }
12492         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
12493
12494         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
12495                                         mac_addr->addr_bytes, NULL);
12496         if (ret != I40E_SUCCESS) {
12497                 PMD_DRV_LOG(ERR, "Failed to change mac");
12498                 return -EIO;
12499         }
12500
12501         return 0;
12502 }
12503
12504 static int
12505 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
12506 {
12507         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12508         struct rte_eth_dev_data *dev_data = pf->dev_data;
12509         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
12510         int ret = 0;
12511
12512         /* check if mtu is within the allowed range */
12513         if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
12514                 return -EINVAL;
12515
12516         /* mtu setting is forbidden if port is start */
12517         if (dev_data->dev_started) {
12518                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
12519                             dev_data->port_id);
12520                 return -EBUSY;
12521         }
12522
12523         if (frame_size > RTE_ETHER_MAX_LEN)
12524                 dev_data->dev_conf.rxmode.offloads |=
12525                         DEV_RX_OFFLOAD_JUMBO_FRAME;
12526         else
12527                 dev_data->dev_conf.rxmode.offloads &=
12528                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
12529
12530         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
12531
12532         return ret;
12533 }
12534
12535 /* Restore ethertype filter */
12536 static void
12537 i40e_ethertype_filter_restore(struct i40e_pf *pf)
12538 {
12539         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12540         struct i40e_ethertype_filter_list
12541                 *ethertype_list = &pf->ethertype.ethertype_list;
12542         struct i40e_ethertype_filter *f;
12543         struct i40e_control_filter_stats stats;
12544         uint16_t flags;
12545
12546         TAILQ_FOREACH(f, ethertype_list, rules) {
12547                 flags = 0;
12548                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
12549                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
12550                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
12551                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
12552                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
12553
12554                 memset(&stats, 0, sizeof(stats));
12555                 i40e_aq_add_rem_control_packet_filter(hw,
12556                                             f->input.mac_addr.addr_bytes,
12557                                             f->input.ether_type,
12558                                             flags, pf->main_vsi->seid,
12559                                             f->queue, 1, &stats, NULL);
12560         }
12561         PMD_DRV_LOG(INFO, "Ethertype filter:"
12562                     " mac_etype_used = %u, etype_used = %u,"
12563                     " mac_etype_free = %u, etype_free = %u",
12564                     stats.mac_etype_used, stats.etype_used,
12565                     stats.mac_etype_free, stats.etype_free);
12566 }
12567
12568 /* Restore tunnel filter */
12569 static void
12570 i40e_tunnel_filter_restore(struct i40e_pf *pf)
12571 {
12572         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12573         struct i40e_vsi *vsi;
12574         struct i40e_pf_vf *vf;
12575         struct i40e_tunnel_filter_list
12576                 *tunnel_list = &pf->tunnel.tunnel_list;
12577         struct i40e_tunnel_filter *f;
12578         struct i40e_aqc_cloud_filters_element_bb cld_filter;
12579         bool big_buffer = 0;
12580
12581         TAILQ_FOREACH(f, tunnel_list, rules) {
12582                 if (!f->is_to_vf)
12583                         vsi = pf->main_vsi;
12584                 else {
12585                         vf = &pf->vfs[f->vf_id];
12586                         vsi = vf->vsi;
12587                 }
12588                 memset(&cld_filter, 0, sizeof(cld_filter));
12589                 rte_ether_addr_copy((struct rte_ether_addr *)
12590                                 &f->input.outer_mac,
12591                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
12592                 rte_ether_addr_copy((struct rte_ether_addr *)
12593                                 &f->input.inner_mac,
12594                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
12595                 cld_filter.element.inner_vlan = f->input.inner_vlan;
12596                 cld_filter.element.flags = f->input.flags;
12597                 cld_filter.element.tenant_id = f->input.tenant_id;
12598                 cld_filter.element.queue_number = f->queue;
12599                 rte_memcpy(cld_filter.general_fields,
12600                            f->input.general_fields,
12601                            sizeof(f->input.general_fields));
12602
12603                 if (((f->input.flags &
12604                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
12605                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
12606                     ((f->input.flags &
12607                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
12608                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
12609                     ((f->input.flags &
12610                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
12611                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
12612                         big_buffer = 1;
12613
12614                 if (big_buffer)
12615                         i40e_aq_add_cloud_filters_bb(hw,
12616                                         vsi->seid, &cld_filter, 1);
12617                 else
12618                         i40e_aq_add_cloud_filters(hw, vsi->seid,
12619                                                   &cld_filter.element, 1);
12620         }
12621 }
12622
12623 /* Restore RSS filter */
12624 static inline void
12625 i40e_rss_filter_restore(struct i40e_pf *pf)
12626 {
12627         struct i40e_rss_conf_list *list = &pf->rss_config_list;
12628         struct i40e_rss_filter *filter;
12629
12630         TAILQ_FOREACH(filter, list, next) {
12631                 i40e_config_rss_filter(pf, &filter->rss_filter_info, TRUE);
12632         }
12633 }
12634
12635 static void
12636 i40e_filter_restore(struct i40e_pf *pf)
12637 {
12638         i40e_ethertype_filter_restore(pf);
12639         i40e_tunnel_filter_restore(pf);
12640         i40e_fdir_filter_restore(pf);
12641         i40e_rss_filter_restore(pf);
12642 }
12643
12644 bool
12645 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
12646 {
12647         if (strcmp(dev->device->driver->name, drv->driver.name))
12648                 return false;
12649
12650         return true;
12651 }
12652
12653 bool
12654 is_i40e_supported(struct rte_eth_dev *dev)
12655 {
12656         return is_device_supported(dev, &rte_i40e_pmd);
12657 }
12658
12659 struct i40e_customized_pctype*
12660 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
12661 {
12662         int i;
12663
12664         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
12665                 if (pf->customized_pctype[i].index == index)
12666                         return &pf->customized_pctype[i];
12667         }
12668         return NULL;
12669 }
12670
12671 static int
12672 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
12673                               uint32_t pkg_size, uint32_t proto_num,
12674                               struct rte_pmd_i40e_proto_info *proto,
12675                               enum rte_pmd_i40e_package_op op)
12676 {
12677         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12678         uint32_t pctype_num;
12679         struct rte_pmd_i40e_ptype_info *pctype;
12680         uint32_t buff_size;
12681         struct i40e_customized_pctype *new_pctype = NULL;
12682         uint8_t proto_id;
12683         uint8_t pctype_value;
12684         char name[64];
12685         uint32_t i, j, n;
12686         int ret;
12687
12688         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12689             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12690                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12691                 return -1;
12692         }
12693
12694         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12695                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
12696                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
12697         if (ret) {
12698                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
12699                 return -1;
12700         }
12701         if (!pctype_num) {
12702                 PMD_DRV_LOG(INFO, "No new pctype added");
12703                 return -1;
12704         }
12705
12706         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
12707         pctype = rte_zmalloc("new_pctype", buff_size, 0);
12708         if (!pctype) {
12709                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12710                 return -1;
12711         }
12712         /* get information about new pctype list */
12713         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12714                                         (uint8_t *)pctype, buff_size,
12715                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
12716         if (ret) {
12717                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
12718                 rte_free(pctype);
12719                 return -1;
12720         }
12721
12722         /* Update customized pctype. */
12723         for (i = 0; i < pctype_num; i++) {
12724                 pctype_value = pctype[i].ptype_id;
12725                 memset(name, 0, sizeof(name));
12726                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12727                         proto_id = pctype[i].protocols[j];
12728                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12729                                 continue;
12730                         for (n = 0; n < proto_num; n++) {
12731                                 if (proto[n].proto_id != proto_id)
12732                                         continue;
12733                                 strlcat(name, proto[n].name, sizeof(name));
12734                                 strlcat(name, "_", sizeof(name));
12735                                 break;
12736                         }
12737                 }
12738                 name[strlen(name) - 1] = '\0';
12739                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12740                 if (!strcmp(name, "GTPC"))
12741                         new_pctype =
12742                                 i40e_find_customized_pctype(pf,
12743                                                       I40E_CUSTOMIZED_GTPC);
12744                 else if (!strcmp(name, "GTPU_IPV4"))
12745                         new_pctype =
12746                                 i40e_find_customized_pctype(pf,
12747                                                    I40E_CUSTOMIZED_GTPU_IPV4);
12748                 else if (!strcmp(name, "GTPU_IPV6"))
12749                         new_pctype =
12750                                 i40e_find_customized_pctype(pf,
12751                                                    I40E_CUSTOMIZED_GTPU_IPV6);
12752                 else if (!strcmp(name, "GTPU"))
12753                         new_pctype =
12754                                 i40e_find_customized_pctype(pf,
12755                                                       I40E_CUSTOMIZED_GTPU);
12756                 else if (!strcmp(name, "IPV4_L2TPV3"))
12757                         new_pctype =
12758                                 i40e_find_customized_pctype(pf,
12759                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
12760                 else if (!strcmp(name, "IPV6_L2TPV3"))
12761                         new_pctype =
12762                                 i40e_find_customized_pctype(pf,
12763                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
12764                 else if (!strcmp(name, "IPV4_ESP"))
12765                         new_pctype =
12766                                 i40e_find_customized_pctype(pf,
12767                                                 I40E_CUSTOMIZED_ESP_IPV4);
12768                 else if (!strcmp(name, "IPV6_ESP"))
12769                         new_pctype =
12770                                 i40e_find_customized_pctype(pf,
12771                                                 I40E_CUSTOMIZED_ESP_IPV6);
12772                 else if (!strcmp(name, "IPV4_UDP_ESP"))
12773                         new_pctype =
12774                                 i40e_find_customized_pctype(pf,
12775                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
12776                 else if (!strcmp(name, "IPV6_UDP_ESP"))
12777                         new_pctype =
12778                                 i40e_find_customized_pctype(pf,
12779                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
12780                 else if (!strcmp(name, "IPV4_AH"))
12781                         new_pctype =
12782                                 i40e_find_customized_pctype(pf,
12783                                                 I40E_CUSTOMIZED_AH_IPV4);
12784                 else if (!strcmp(name, "IPV6_AH"))
12785                         new_pctype =
12786                                 i40e_find_customized_pctype(pf,
12787                                                 I40E_CUSTOMIZED_AH_IPV6);
12788                 if (new_pctype) {
12789                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12790                                 new_pctype->pctype = pctype_value;
12791                                 new_pctype->valid = true;
12792                         } else {
12793                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12794                                 new_pctype->valid = false;
12795                         }
12796                 }
12797         }
12798
12799         rte_free(pctype);
12800         return 0;
12801 }
12802
12803 static int
12804 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12805                              uint32_t pkg_size, uint32_t proto_num,
12806                              struct rte_pmd_i40e_proto_info *proto,
12807                              enum rte_pmd_i40e_package_op op)
12808 {
12809         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12810         uint16_t port_id = dev->data->port_id;
12811         uint32_t ptype_num;
12812         struct rte_pmd_i40e_ptype_info *ptype;
12813         uint32_t buff_size;
12814         uint8_t proto_id;
12815         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12816         uint32_t i, j, n;
12817         bool in_tunnel;
12818         int ret;
12819
12820         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12821             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12822                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12823                 return -1;
12824         }
12825
12826         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12827                 rte_pmd_i40e_ptype_mapping_reset(port_id);
12828                 return 0;
12829         }
12830
12831         /* get information about new ptype num */
12832         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12833                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
12834                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12835         if (ret) {
12836                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12837                 return ret;
12838         }
12839         if (!ptype_num) {
12840                 PMD_DRV_LOG(INFO, "No new ptype added");
12841                 return -1;
12842         }
12843
12844         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12845         ptype = rte_zmalloc("new_ptype", buff_size, 0);
12846         if (!ptype) {
12847                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12848                 return -1;
12849         }
12850
12851         /* get information about new ptype list */
12852         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12853                                         (uint8_t *)ptype, buff_size,
12854                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12855         if (ret) {
12856                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12857                 rte_free(ptype);
12858                 return ret;
12859         }
12860
12861         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12862         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12863         if (!ptype_mapping) {
12864                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12865                 rte_free(ptype);
12866                 return -1;
12867         }
12868
12869         /* Update ptype mapping table. */
12870         for (i = 0; i < ptype_num; i++) {
12871                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12872                 ptype_mapping[i].sw_ptype = 0;
12873                 in_tunnel = false;
12874                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12875                         proto_id = ptype[i].protocols[j];
12876                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12877                                 continue;
12878                         for (n = 0; n < proto_num; n++) {
12879                                 if (proto[n].proto_id != proto_id)
12880                                         continue;
12881                                 memset(name, 0, sizeof(name));
12882                                 strcpy(name, proto[n].name);
12883                                 PMD_DRV_LOG(INFO, "name = %s\n", name);
12884                                 if (!strncasecmp(name, "PPPOE", 5))
12885                                         ptype_mapping[i].sw_ptype |=
12886                                                 RTE_PTYPE_L2_ETHER_PPPOE;
12887                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12888                                          !in_tunnel) {
12889                                         ptype_mapping[i].sw_ptype |=
12890                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12891                                         ptype_mapping[i].sw_ptype |=
12892                                                 RTE_PTYPE_L4_FRAG;
12893                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12894                                            in_tunnel) {
12895                                         ptype_mapping[i].sw_ptype |=
12896                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12897                                         ptype_mapping[i].sw_ptype |=
12898                                                 RTE_PTYPE_INNER_L4_FRAG;
12899                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
12900                                         ptype_mapping[i].sw_ptype |=
12901                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12902                                         in_tunnel = true;
12903                                 } else if (!strncasecmp(name, "IPV4", 4) &&
12904                                            !in_tunnel)
12905                                         ptype_mapping[i].sw_ptype |=
12906                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12907                                 else if (!strncasecmp(name, "IPV4", 4) &&
12908                                          in_tunnel)
12909                                         ptype_mapping[i].sw_ptype |=
12910                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12911                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12912                                          !in_tunnel) {
12913                                         ptype_mapping[i].sw_ptype |=
12914                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12915                                         ptype_mapping[i].sw_ptype |=
12916                                                 RTE_PTYPE_L4_FRAG;
12917                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12918                                            in_tunnel) {
12919                                         ptype_mapping[i].sw_ptype |=
12920                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12921                                         ptype_mapping[i].sw_ptype |=
12922                                                 RTE_PTYPE_INNER_L4_FRAG;
12923                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
12924                                         ptype_mapping[i].sw_ptype |=
12925                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12926                                         in_tunnel = true;
12927                                 } else if (!strncasecmp(name, "IPV6", 4) &&
12928                                            !in_tunnel)
12929                                         ptype_mapping[i].sw_ptype |=
12930                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12931                                 else if (!strncasecmp(name, "IPV6", 4) &&
12932                                          in_tunnel)
12933                                         ptype_mapping[i].sw_ptype |=
12934                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12935                                 else if (!strncasecmp(name, "UDP", 3) &&
12936                                          !in_tunnel)
12937                                         ptype_mapping[i].sw_ptype |=
12938                                                 RTE_PTYPE_L4_UDP;
12939                                 else if (!strncasecmp(name, "UDP", 3) &&
12940                                          in_tunnel)
12941                                         ptype_mapping[i].sw_ptype |=
12942                                                 RTE_PTYPE_INNER_L4_UDP;
12943                                 else if (!strncasecmp(name, "TCP", 3) &&
12944                                          !in_tunnel)
12945                                         ptype_mapping[i].sw_ptype |=
12946                                                 RTE_PTYPE_L4_TCP;
12947                                 else if (!strncasecmp(name, "TCP", 3) &&
12948                                          in_tunnel)
12949                                         ptype_mapping[i].sw_ptype |=
12950                                                 RTE_PTYPE_INNER_L4_TCP;
12951                                 else if (!strncasecmp(name, "SCTP", 4) &&
12952                                          !in_tunnel)
12953                                         ptype_mapping[i].sw_ptype |=
12954                                                 RTE_PTYPE_L4_SCTP;
12955                                 else if (!strncasecmp(name, "SCTP", 4) &&
12956                                          in_tunnel)
12957                                         ptype_mapping[i].sw_ptype |=
12958                                                 RTE_PTYPE_INNER_L4_SCTP;
12959                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12960                                           !strncasecmp(name, "ICMPV6", 6)) &&
12961                                          !in_tunnel)
12962                                         ptype_mapping[i].sw_ptype |=
12963                                                 RTE_PTYPE_L4_ICMP;
12964                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12965                                           !strncasecmp(name, "ICMPV6", 6)) &&
12966                                          in_tunnel)
12967                                         ptype_mapping[i].sw_ptype |=
12968                                                 RTE_PTYPE_INNER_L4_ICMP;
12969                                 else if (!strncasecmp(name, "GTPC", 4)) {
12970                                         ptype_mapping[i].sw_ptype |=
12971                                                 RTE_PTYPE_TUNNEL_GTPC;
12972                                         in_tunnel = true;
12973                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12974                                         ptype_mapping[i].sw_ptype |=
12975                                                 RTE_PTYPE_TUNNEL_GTPU;
12976                                         in_tunnel = true;
12977                                 } else if (!strncasecmp(name, "ESP", 3)) {
12978                                         ptype_mapping[i].sw_ptype |=
12979                                                 RTE_PTYPE_TUNNEL_ESP;
12980                                         in_tunnel = true;
12981                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12982                                         ptype_mapping[i].sw_ptype |=
12983                                                 RTE_PTYPE_TUNNEL_GRENAT;
12984                                         in_tunnel = true;
12985                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12986                                            !strncasecmp(name, "L2TPV2", 6) ||
12987                                            !strncasecmp(name, "L2TPV3", 6)) {
12988                                         ptype_mapping[i].sw_ptype |=
12989                                                 RTE_PTYPE_TUNNEL_L2TP;
12990                                         in_tunnel = true;
12991                                 }
12992
12993                                 break;
12994                         }
12995                 }
12996         }
12997
12998         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12999                                                 ptype_num, 0);
13000         if (ret)
13001                 PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
13002
13003         rte_free(ptype_mapping);
13004         rte_free(ptype);
13005         return ret;
13006 }
13007
13008 void
13009 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
13010                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
13011 {
13012         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
13013         uint32_t proto_num;
13014         struct rte_pmd_i40e_proto_info *proto;
13015         uint32_t buff_size;
13016         uint32_t i;
13017         int ret;
13018
13019         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
13020             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
13021                 PMD_DRV_LOG(ERR, "Unsupported operation.");
13022                 return;
13023         }
13024
13025         /* get information about protocol number */
13026         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
13027                                        (uint8_t *)&proto_num, sizeof(proto_num),
13028                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
13029         if (ret) {
13030                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
13031                 return;
13032         }
13033         if (!proto_num) {
13034                 PMD_DRV_LOG(INFO, "No new protocol added");
13035                 return;
13036         }
13037
13038         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
13039         proto = rte_zmalloc("new_proto", buff_size, 0);
13040         if (!proto) {
13041                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
13042                 return;
13043         }
13044
13045         /* get information about protocol list */
13046         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
13047                                         (uint8_t *)proto, buff_size,
13048                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
13049         if (ret) {
13050                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
13051                 rte_free(proto);
13052                 return;
13053         }
13054
13055         /* Check if GTP is supported. */
13056         for (i = 0; i < proto_num; i++) {
13057                 if (!strncmp(proto[i].name, "GTP", 3)) {
13058                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
13059                                 pf->gtp_support = true;
13060                         else
13061                                 pf->gtp_support = false;
13062                         break;
13063                 }
13064         }
13065
13066         /* Check if ESP is supported. */
13067         for (i = 0; i < proto_num; i++) {
13068                 if (!strncmp(proto[i].name, "ESP", 3)) {
13069                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
13070                                 pf->esp_support = true;
13071                         else
13072                                 pf->esp_support = false;
13073                         break;
13074                 }
13075         }
13076
13077         /* Update customized pctype info */
13078         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
13079                                             proto_num, proto, op);
13080         if (ret)
13081                 PMD_DRV_LOG(INFO, "No pctype is updated.");
13082
13083         /* Update customized ptype info */
13084         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
13085                                            proto_num, proto, op);
13086         if (ret)
13087                 PMD_DRV_LOG(INFO, "No ptype is updated.");
13088
13089         rte_free(proto);
13090 }
13091
13092 /* Create a QinQ cloud filter
13093  *
13094  * The Fortville NIC has limited resources for tunnel filters,
13095  * so we can only reuse existing filters.
13096  *
13097  * In step 1 we define which Field Vector fields can be used for
13098  * filter types.
13099  * As we do not have the inner tag defined as a field,
13100  * we have to define it first, by reusing one of L1 entries.
13101  *
13102  * In step 2 we are replacing one of existing filter types with
13103  * a new one for QinQ.
13104  * As we reusing L1 and replacing L2, some of the default filter
13105  * types will disappear,which depends on L1 and L2 entries we reuse.
13106  *
13107  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
13108  *
13109  * 1.   Create L1 filter of outer vlan (12b) which will be in use
13110  *              later when we define the cloud filter.
13111  *      a.      Valid_flags.replace_cloud = 0
13112  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
13113  *      c.      New_filter = 0x10
13114  *      d.      TR bit = 0xff (optional, not used here)
13115  *      e.      Buffer â€“ 2 entries:
13116  *              i.      Byte 0 = 8 (outer vlan FV index).
13117  *                      Byte 1 = 0 (rsv)
13118  *                      Byte 2-3 = 0x0fff
13119  *              ii.     Byte 0 = 37 (inner vlan FV index).
13120  *                      Byte 1 =0 (rsv)
13121  *                      Byte 2-3 = 0x0fff
13122  *
13123  * Step 2:
13124  * 2.   Create cloud filter using two L1 filters entries: stag and
13125  *              new filter(outer vlan+ inner vlan)
13126  *      a.      Valid_flags.replace_cloud = 1
13127  *      b.      Old_filter = 1 (instead of outer IP)
13128  *      c.      New_filter = 0x10
13129  *      d.      Buffer â€“ 2 entries:
13130  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
13131  *                      Byte 1-3 = 0 (rsv)
13132  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
13133  *                      Byte 9-11 = 0 (rsv)
13134  */
13135 static int
13136 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
13137 {
13138         int ret = -ENOTSUP;
13139         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
13140         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
13141         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13142         struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
13143
13144         if (pf->support_multi_driver) {
13145                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
13146                 return ret;
13147         }
13148
13149         /* Init */
13150         memset(&filter_replace, 0,
13151                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13152         memset(&filter_replace_buf, 0,
13153                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13154
13155         /* create L1 filter */
13156         filter_replace.old_filter_type =
13157                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
13158         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13159         filter_replace.tr_bit = 0;
13160
13161         /* Prepare the buffer, 2 entries */
13162         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
13163         filter_replace_buf.data[0] |=
13164                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13165         /* Field Vector 12b mask */
13166         filter_replace_buf.data[2] = 0xff;
13167         filter_replace_buf.data[3] = 0x0f;
13168         filter_replace_buf.data[4] =
13169                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
13170         filter_replace_buf.data[4] |=
13171                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13172         /* Field Vector 12b mask */
13173         filter_replace_buf.data[6] = 0xff;
13174         filter_replace_buf.data[7] = 0x0f;
13175         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13176                         &filter_replace_buf);
13177         if (ret != I40E_SUCCESS)
13178                 return ret;
13179
13180         if (filter_replace.old_filter_type !=
13181             filter_replace.new_filter_type)
13182                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
13183                             " original: 0x%x, new: 0x%x",
13184                             dev->device->name,
13185                             filter_replace.old_filter_type,
13186                             filter_replace.new_filter_type);
13187
13188         /* Apply the second L2 cloud filter */
13189         memset(&filter_replace, 0,
13190                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
13191         memset(&filter_replace_buf, 0,
13192                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
13193
13194         /* create L2 filter, input for L2 filter will be L1 filter  */
13195         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
13196         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
13197         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13198
13199         /* Prepare the buffer, 2 entries */
13200         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
13201         filter_replace_buf.data[0] |=
13202                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13203         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
13204         filter_replace_buf.data[4] |=
13205                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
13206         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
13207                         &filter_replace_buf);
13208         if (!ret && (filter_replace.old_filter_type !=
13209                      filter_replace.new_filter_type))
13210                 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
13211                             " original: 0x%x, new: 0x%x",
13212                             dev->device->name,
13213                             filter_replace.old_filter_type,
13214                             filter_replace.new_filter_type);
13215
13216         return ret;
13217 }
13218
13219 int
13220 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
13221                    const struct rte_flow_action_rss *in)
13222 {
13223         if (in->key_len > RTE_DIM(out->key) ||
13224             in->queue_num > RTE_DIM(out->queue))
13225                 return -EINVAL;
13226         if (!in->key && in->key_len)
13227                 return -EINVAL;
13228         out->conf = (struct rte_flow_action_rss){
13229                 .func = in->func,
13230                 .level = in->level,
13231                 .types = in->types,
13232                 .key_len = in->key_len,
13233                 .queue_num = in->queue_num,
13234                 .queue = memcpy(out->queue, in->queue,
13235                                 sizeof(*in->queue) * in->queue_num),
13236         };
13237         if (in->key)
13238                 out->conf.key = memcpy(out->key, in->key, in->key_len);
13239         return 0;
13240 }
13241
13242 /* Write HENA register to enable hash */
13243 static int
13244 i40e_rss_hash_set(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *rss_conf)
13245 {
13246         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13247         uint8_t *key = (void *)(uintptr_t)rss_conf->conf.key;
13248         uint64_t hena;
13249         int ret;
13250
13251         ret = i40e_set_rss_key(pf->main_vsi, key,
13252                                rss_conf->conf.key_len);
13253         if (ret)
13254                 return ret;
13255
13256         hena = i40e_config_hena(pf->adapter, rss_conf->conf.types);
13257         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
13258         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
13259         I40E_WRITE_FLUSH(hw);
13260
13261         return 0;
13262 }
13263
13264 /* Configure hash input set */
13265 static int
13266 i40e_rss_conf_hash_inset(struct i40e_pf *pf, uint64_t types)
13267 {
13268         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13269         struct rte_eth_input_set_conf conf;
13270         uint64_t mask0;
13271         int ret = 0;
13272         uint32_t j;
13273         int i;
13274         static const struct {
13275                 uint64_t type;
13276                 enum rte_eth_input_set_field field;
13277         } inset_match_table[] = {
13278                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
13279                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13280                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
13281                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13282                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_SRC_ONLY,
13283                         RTE_ETH_INPUT_SET_UNKNOWN},
13284                 {ETH_RSS_FRAG_IPV4 | ETH_RSS_L4_DST_ONLY,
13285                         RTE_ETH_INPUT_SET_UNKNOWN},
13286
13287                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
13288                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13289                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
13290                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13291                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
13292                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13293                 {ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
13294                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13295
13296                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
13297                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13298                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
13299                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13300                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
13301                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13302                 {ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
13303                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13304
13305                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
13306                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13307                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
13308                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13309                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
13310                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13311                 {ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
13312                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13313
13314                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
13315                         RTE_ETH_INPUT_SET_L3_SRC_IP4},
13316                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
13317                         RTE_ETH_INPUT_SET_L3_DST_IP4},
13318                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_SRC_ONLY,
13319                         RTE_ETH_INPUT_SET_UNKNOWN},
13320                 {ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L4_DST_ONLY,
13321                         RTE_ETH_INPUT_SET_UNKNOWN},
13322
13323                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
13324                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13325                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
13326                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13327                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_SRC_ONLY,
13328                         RTE_ETH_INPUT_SET_UNKNOWN},
13329                 {ETH_RSS_FRAG_IPV6 | ETH_RSS_L4_DST_ONLY,
13330                         RTE_ETH_INPUT_SET_UNKNOWN},
13331
13332                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
13333                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13334                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
13335                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13336                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
13337                         RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
13338                 {ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
13339                         RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
13340
13341                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
13342                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13343                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
13344                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13345                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
13346                         RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
13347                 {ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
13348                         RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
13349
13350                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
13351                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13352                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
13353                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13354                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
13355                         RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
13356                 {ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
13357                         RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
13358
13359                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
13360                         RTE_ETH_INPUT_SET_L3_SRC_IP6},
13361                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
13362                         RTE_ETH_INPUT_SET_L3_DST_IP6},
13363                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_SRC_ONLY,
13364                         RTE_ETH_INPUT_SET_UNKNOWN},
13365                 {ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L4_DST_ONLY,
13366                         RTE_ETH_INPUT_SET_UNKNOWN},
13367         };
13368
13369         mask0 = types & pf->adapter->flow_types_mask;
13370         conf.op = RTE_ETH_INPUT_SET_SELECT;
13371         conf.inset_size = 0;
13372         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX; i++) {
13373                 if (mask0 & (1ULL << i)) {
13374                         conf.flow_type = i;
13375                         break;
13376                 }
13377         }
13378
13379         for (j = 0; j < RTE_DIM(inset_match_table); j++) {
13380                 if ((types & inset_match_table[j].type) ==
13381                     inset_match_table[j].type) {
13382                         if (inset_match_table[j].field ==
13383                             RTE_ETH_INPUT_SET_UNKNOWN)
13384                                 return -EINVAL;
13385
13386                         conf.field[conf.inset_size] =
13387                                 inset_match_table[j].field;
13388                         conf.inset_size++;
13389                 }
13390         }
13391
13392         if (conf.inset_size) {
13393                 ret = i40e_hash_filter_inset_select(hw, &conf);
13394                 if (ret)
13395                         return ret;
13396         }
13397
13398         return ret;
13399 }
13400
13401 /* Look up the conflicted rule then mark it as invalid */
13402 static void
13403 i40e_rss_mark_invalid_rule(struct i40e_pf *pf,
13404                 struct i40e_rte_flow_rss_conf *conf)
13405 {
13406         struct i40e_rss_filter *rss_item;
13407         uint64_t rss_inset;
13408
13409         /* Clear input set bits before comparing the pctype */
13410         rss_inset = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
13411                 ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
13412
13413         /* Look up the conflicted rule then mark it as invalid */
13414         TAILQ_FOREACH(rss_item, &pf->rss_config_list, next) {
13415                 if (!rss_item->rss_filter_info.valid)
13416                         continue;
13417
13418                 if (conf->conf.queue_num &&
13419                     rss_item->rss_filter_info.conf.queue_num)
13420                         rss_item->rss_filter_info.valid = false;
13421
13422                 if (conf->conf.types &&
13423                     (rss_item->rss_filter_info.conf.types &
13424                     rss_inset) ==
13425                     (conf->conf.types & rss_inset))
13426                         rss_item->rss_filter_info.valid = false;
13427
13428                 if (conf->conf.func ==
13429                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
13430                     rss_item->rss_filter_info.conf.func ==
13431                     RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13432                         rss_item->rss_filter_info.valid = false;
13433         }
13434 }
13435
13436 /* Configure RSS hash function */
13437 static int
13438 i40e_rss_config_hash_function(struct i40e_pf *pf,
13439                 struct i40e_rte_flow_rss_conf *conf)
13440 {
13441         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13442         uint32_t reg, i;
13443         uint64_t mask0;
13444         uint16_t j;
13445
13446         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13447                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13448                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
13449                         PMD_DRV_LOG(DEBUG, "Hash function already set to Simple XOR");
13450                         I40E_WRITE_FLUSH(hw);
13451                         i40e_rss_mark_invalid_rule(pf, conf);
13452
13453                         return 0;
13454                 }
13455                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
13456
13457                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13458                 I40E_WRITE_FLUSH(hw);
13459                 i40e_rss_mark_invalid_rule(pf, conf);
13460         } else if (conf->conf.func ==
13461                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13462                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13463
13464                 i40e_set_symmetric_hash_enable_per_port(hw, 1);
13465                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13466                         if (mask0 & (1UL << i))
13467                                 break;
13468                 }
13469
13470                 if (i == UINT64_BIT)
13471                         return -EINVAL;
13472
13473                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13474                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13475                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13476                                 i40e_write_global_rx_ctl(hw,
13477                                         I40E_GLQF_HSYM(j),
13478                                         I40E_GLQF_HSYM_SYMH_ENA_MASK);
13479                 }
13480         }
13481
13482         return 0;
13483 }
13484
13485 /* Enable RSS according to the configuration */
13486 static int
13487 i40e_rss_enable_hash(struct i40e_pf *pf,
13488                 struct i40e_rte_flow_rss_conf *conf)
13489 {
13490         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13491         struct i40e_rte_flow_rss_conf rss_conf;
13492
13493         if (!(conf->conf.types & pf->adapter->flow_types_mask))
13494                 return -ENOTSUP;
13495
13496         memset(&rss_conf, 0, sizeof(rss_conf));
13497         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13498
13499         /* Configure hash input set */
13500         if (i40e_rss_conf_hash_inset(pf, conf->conf.types))
13501                 return -EINVAL;
13502
13503         if (rss_conf.conf.key == NULL || rss_conf.conf.key_len <
13504             (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
13505                 /* Random default keys */
13506                 static uint32_t rss_key_default[] = {0x6b793944,
13507                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
13508                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
13509                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
13510
13511                 rss_conf.conf.key = (uint8_t *)rss_key_default;
13512                 rss_conf.conf.key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
13513                                 sizeof(uint32_t);
13514                 PMD_DRV_LOG(INFO,
13515                         "No valid RSS key config for i40e, using default\n");
13516         }
13517
13518         rss_conf.conf.types |= rss_info->conf.types;
13519         i40e_rss_hash_set(pf, &rss_conf);
13520
13521         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
13522                 i40e_rss_config_hash_function(pf, conf);
13523
13524         i40e_rss_mark_invalid_rule(pf, conf);
13525
13526         return 0;
13527 }
13528
13529 /* Configure RSS queue region */
13530 static int
13531 i40e_rss_config_queue_region(struct i40e_pf *pf,
13532                 struct i40e_rte_flow_rss_conf *conf)
13533 {
13534         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13535         uint32_t lut = 0;
13536         uint16_t j, num;
13537         uint32_t i;
13538
13539         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13540          * It's necessary to calculate the actual PF queues that are configured.
13541          */
13542         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13543                 num = i40e_pf_calc_configured_queues_num(pf);
13544         else
13545                 num = pf->dev_data->nb_rx_queues;
13546
13547         num = RTE_MIN(num, conf->conf.queue_num);
13548         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13549                         num);
13550
13551         if (num == 0) {
13552                 PMD_DRV_LOG(ERR,
13553                         "No PF queues are configured to enable RSS for port %u",
13554                         pf->dev_data->port_id);
13555                 return -ENOTSUP;
13556         }
13557
13558         /* Fill in redirection table */
13559         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13560                 if (j == num)
13561                         j = 0;
13562                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
13563                         hw->func_caps.rss_table_entry_width) - 1));
13564                 if ((i & 3) == 3)
13565                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13566         }
13567
13568         i40e_rss_mark_invalid_rule(pf, conf);
13569
13570         return 0;
13571 }
13572
13573 /* Configure RSS hash function to default */
13574 static int
13575 i40e_rss_clear_hash_function(struct i40e_pf *pf,
13576                 struct i40e_rte_flow_rss_conf *conf)
13577 {
13578         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13579         uint32_t i, reg;
13580         uint64_t mask0;
13581         uint16_t j;
13582
13583         if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13584                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
13585                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
13586                         PMD_DRV_LOG(DEBUG,
13587                                 "Hash function already set to Toeplitz");
13588                         I40E_WRITE_FLUSH(hw);
13589
13590                         return 0;
13591                 }
13592                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
13593
13594                 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
13595                 I40E_WRITE_FLUSH(hw);
13596         } else if (conf->conf.func ==
13597                    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
13598                 mask0 = conf->conf.types & pf->adapter->flow_types_mask;
13599
13600                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
13601                         if (mask0 & (1UL << i))
13602                                 break;
13603                 }
13604
13605                 if (i == UINT64_BIT)
13606                         return -EINVAL;
13607
13608                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
13609                      j < I40E_FILTER_PCTYPE_MAX; j++) {
13610                         if (pf->adapter->pctypes_tbl[i] & (1ULL << j))
13611                                 i40e_write_global_rx_ctl(hw,
13612                                         I40E_GLQF_HSYM(j),
13613                                         0);
13614                 }
13615         }
13616
13617         return 0;
13618 }
13619
13620 /* Disable RSS hash and configure default input set */
13621 static int
13622 i40e_rss_disable_hash(struct i40e_pf *pf,
13623                 struct i40e_rte_flow_rss_conf *conf)
13624 {
13625         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13626         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13627         struct i40e_rte_flow_rss_conf rss_conf;
13628         uint32_t i;
13629
13630         memset(&rss_conf, 0, sizeof(rss_conf));
13631         rte_memcpy(&rss_conf, conf, sizeof(rss_conf));
13632
13633         /* Disable RSS hash */
13634         rss_conf.conf.types = rss_info->conf.types & ~(conf->conf.types);
13635         i40e_rss_hash_set(pf, &rss_conf);
13636
13637         for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
13638                 if (!(pf->adapter->flow_types_mask & (1ULL << i)) ||
13639                     !(conf->conf.types & (1ULL << i)))
13640                         continue;
13641
13642                 /* Configure default input set */
13643                 struct rte_eth_input_set_conf input_conf = {
13644                         .op = RTE_ETH_INPUT_SET_SELECT,
13645                         .flow_type = i,
13646                         .inset_size = 1,
13647                 };
13648                 input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
13649                 i40e_hash_filter_inset_select(hw, &input_conf);
13650         }
13651
13652         rss_info->conf.types = rss_conf.conf.types;
13653
13654         i40e_rss_clear_hash_function(pf, conf);
13655
13656         return 0;
13657 }
13658
13659 /* Configure RSS queue region to default */
13660 static int
13661 i40e_rss_clear_queue_region(struct i40e_pf *pf)
13662 {
13663         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
13664         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13665         uint16_t queue[I40E_MAX_Q_PER_TC];
13666         uint32_t num_rxq, i;
13667         uint32_t lut = 0;
13668         uint16_t j, num;
13669
13670         num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
13671
13672         for (j = 0; j < num_rxq; j++)
13673                 queue[j] = j;
13674
13675         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
13676          * It's necessary to calculate the actual PF queues that are configured.
13677          */
13678         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
13679                 num = i40e_pf_calc_configured_queues_num(pf);
13680         else
13681                 num = pf->dev_data->nb_rx_queues;
13682
13683         num = RTE_MIN(num, num_rxq);
13684         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
13685                         num);
13686
13687         if (num == 0) {
13688                 PMD_DRV_LOG(ERR,
13689                         "No PF queues are configured to enable RSS for port %u",
13690                         pf->dev_data->port_id);
13691                 return -ENOTSUP;
13692         }
13693
13694         /* Fill in redirection table */
13695         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
13696                 if (j == num)
13697                         j = 0;
13698                 lut = (lut << 8) | (queue[j] & ((0x1 <<
13699                         hw->func_caps.rss_table_entry_width) - 1));
13700                 if ((i & 3) == 3)
13701                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
13702         }
13703
13704         rss_info->conf.queue_num = 0;
13705         memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
13706
13707         return 0;
13708 }
13709
13710 int
13711 i40e_config_rss_filter(struct i40e_pf *pf,
13712                 struct i40e_rte_flow_rss_conf *conf, bool add)
13713 {
13714         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
13715         struct rte_flow_action_rss update_conf = rss_info->conf;
13716         int ret = 0;
13717
13718         if (add) {
13719                 if (conf->conf.queue_num) {
13720                         /* Configure RSS queue region */
13721                         ret = i40e_rss_config_queue_region(pf, conf);
13722                         if (ret)
13723                                 return ret;
13724
13725                         update_conf.queue_num = conf->conf.queue_num;
13726                         update_conf.queue = conf->conf.queue;
13727                 } else if (conf->conf.func ==
13728                            RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
13729                         /* Configure hash function */
13730                         ret = i40e_rss_config_hash_function(pf, conf);
13731                         if (ret)
13732                                 return ret;
13733
13734                         update_conf.func = conf->conf.func;
13735                 } else {
13736                         /* Configure hash enable and input set */
13737                         ret = i40e_rss_enable_hash(pf, conf);
13738                         if (ret)
13739                                 return ret;
13740
13741                         update_conf.types |= conf->conf.types;
13742                         update_conf.key = conf->conf.key;
13743                         update_conf.key_len = conf->conf.key_len;
13744                 }
13745
13746                 /* Update RSS info in pf */
13747                 if (i40e_rss_conf_init(rss_info, &update_conf))
13748                         return -EINVAL;
13749         } else {
13750                 if (!conf->valid)
13751                         return 0;
13752
13753                 if (conf->conf.queue_num)
13754                         i40e_rss_clear_queue_region(pf);
13755                 else if (conf->conf.func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)
13756                         i40e_rss_clear_hash_function(pf, conf);
13757                 else
13758                         i40e_rss_disable_hash(pf, conf);
13759         }
13760
13761         return 0;
13762 }
13763
13764 RTE_LOG_REGISTER(i40e_logtype_init, pmd.net.i40e.init, NOTICE);
13765 RTE_LOG_REGISTER(i40e_logtype_driver, pmd.net.i40e.driver, NOTICE);
13766 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
13767 RTE_LOG_REGISTER(i40e_logtype_rx, pmd.net.i40e.rx, DEBUG);
13768 #endif
13769 #ifdef RTE_LIBRTE_I40E_DEBUG_TX
13770 RTE_LOG_REGISTER(i40e_logtype_tx, pmd.net.i40e.tx, DEBUG);
13771 #endif
13772 #ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
13773 RTE_LOG_REGISTER(i40e_logtype_tx_free, pmd.net.i40e.tx_free, DEBUG);
13774 #endif
13775
13776 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
13777                               ETH_I40E_FLOATING_VEB_ARG "=1"
13778                               ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
13779                               ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
13780                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
13781                               ETH_I40E_USE_LATEST_VEC "=0|1");