net/i40e: fix missing mbuf fast free offload
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_eth_ctrl.h>
28 #include <rte_tailq.h>
29 #include <rte_hash_crc.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45
46 #define I40E_CLEAR_PXE_WAIT_MS     200
47
48 /* Maximun number of capability elements */
49 #define I40E_MAX_CAP_ELE_NUM       128
50
51 /* Wait count and interval */
52 #define I40E_CHK_Q_ENA_COUNT       1000
53 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
54
55 /* Maximun number of VSI */
56 #define I40E_MAX_NUM_VSIS          (384UL)
57
58 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
59
60 /* Flow control default timer */
61 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
62
63 /* Flow control enable fwd bit */
64 #define I40E_PRTMAC_FWD_CTRL   0x00000001
65
66 /* Receive Packet Buffer size */
67 #define I40E_RXPBSIZE (968 * 1024)
68
69 /* Kilobytes shift */
70 #define I40E_KILOSHIFT 10
71
72 /* Flow control default high water */
73 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
74
75 /* Flow control default low water */
76 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
77
78 /* Receive Average Packet Size in Byte*/
79 #define I40E_PACKET_AVERAGE_SIZE 128
80
81 /* Mask of PF interrupt causes */
82 #define I40E_PFINT_ICR0_ENA_MASK ( \
83                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
84                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
85                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
86                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
87                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
91                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
92
93 #define I40E_FLOW_TYPES ( \
94         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
98         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
99         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
104         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
105
106 /* Additional timesync values. */
107 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
108 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
109 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
110 #define I40E_PRTTSYN_TSYNENA     0x80000000
111 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
112 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
113
114 /**
115  * Below are values for writing un-exposed registers suggested
116  * by silicon experts
117  */
118 /* Destination MAC address */
119 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
120 /* Source MAC address */
121 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
122 /* Outer (S-Tag) VLAN tag in the outer L2 header */
123 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
124 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
126 /* Single VLAN tag in the inner L2 header */
127 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
128 /* Source IPv4 address */
129 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
130 /* Destination IPv4 address */
131 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
132 /* Source IPv4 address for X722 */
133 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
134 /* Destination IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
136 /* IPv4 Protocol for X722 */
137 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
138 /* IPv4 Time to Live for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
140 /* IPv4 Type of Service (TOS) */
141 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
142 /* IPv4 Protocol */
143 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
144 /* IPv4 Time to Live */
145 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
146 /* Source IPv6 address */
147 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
148 /* Destination IPv6 address */
149 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
150 /* IPv6 Traffic Class (TC) */
151 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
152 /* IPv6 Next Header */
153 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
154 /* IPv6 Hop Limit */
155 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
156 /* Source L4 port */
157 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
158 /* Destination L4 port */
159 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
160 /* SCTP verification tag */
161 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
162 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
163 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
164 /* Source port of tunneling UDP */
165 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
166 /* Destination port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
168 /* UDP Tunneling ID, NVGRE/GRE key */
169 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
170 /* Last ether type */
171 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
172 /* Tunneling outer destination IPv4 address */
173 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
174 /* Tunneling outer destination IPv6 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
176 /* 1st word of flex payload */
177 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
178 /* 2nd word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
180 /* 3rd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
182 /* 4th word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
184 /* 5th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
186 /* 6th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
188 /* 7th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
190 /* 8th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
192 /* all 8 words flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
194 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
195
196 #define I40E_TRANSLATE_INSET 0
197 #define I40E_TRANSLATE_REG   1
198
199 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
200 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
201 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
202 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
203 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
204 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
205
206 /* PCI offset for querying capability */
207 #define PCI_DEV_CAP_REG            0xA4
208 /* PCI offset for enabling/disabling Extended Tag */
209 #define PCI_DEV_CTRL_REG           0xA8
210 /* Bit mask of Extended Tag capability */
211 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
212 /* Bit shift of Extended Tag enable/disable */
213 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
214 /* Bit mask of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
216
217 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
218 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
219 static int i40e_dev_configure(struct rte_eth_dev *dev);
220 static int i40e_dev_start(struct rte_eth_dev *dev);
221 static void i40e_dev_stop(struct rte_eth_dev *dev);
222 static void i40e_dev_close(struct rte_eth_dev *dev);
223 static int  i40e_dev_reset(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
225 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
229 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
230 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
231                                struct rte_eth_stats *stats);
232 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
233                                struct rte_eth_xstat *xstats, unsigned n);
234 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
235                                      struct rte_eth_xstat_name *xstats_names,
236                                      unsigned limit);
237 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
238 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
239                                             uint16_t queue_id,
240                                             uint8_t stat_idx,
241                                             uint8_t is_rx);
242 static int i40e_fw_version_get(struct rte_eth_dev *dev,
243                                 char *fw_version, size_t fw_size);
244 static void i40e_dev_info_get(struct rte_eth_dev *dev,
245                               struct rte_eth_dev_info *dev_info);
246 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
247                                 uint16_t vlan_id,
248                                 int on);
249 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
250                               enum rte_vlan_type vlan_type,
251                               uint16_t tpid);
252 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
254                                       uint16_t queue,
255                                       int on);
256 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
257 static int i40e_dev_led_on(struct rte_eth_dev *dev);
258 static int i40e_dev_led_off(struct rte_eth_dev *dev);
259 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
260                               struct rte_eth_fc_conf *fc_conf);
261 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
264                                        struct rte_eth_pfc_conf *pfc_conf);
265 static int i40e_macaddr_add(struct rte_eth_dev *dev,
266                             struct ether_addr *mac_addr,
267                             uint32_t index,
268                             uint32_t pool);
269 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
270 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
271                                     struct rte_eth_rss_reta_entry64 *reta_conf,
272                                     uint16_t reta_size);
273 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
274                                    struct rte_eth_rss_reta_entry64 *reta_conf,
275                                    uint16_t reta_size);
276
277 static int i40e_get_cap(struct i40e_hw *hw);
278 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
279 static int i40e_pf_setup(struct i40e_pf *pf);
280 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
281 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
282 static int i40e_dcb_setup(struct rte_eth_dev *dev);
283 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
284                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
285 static void i40e_stat_update_48(struct i40e_hw *hw,
286                                uint32_t hireg,
287                                uint32_t loreg,
288                                bool offset_loaded,
289                                uint64_t *offset,
290                                uint64_t *stat);
291 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
292 static void i40e_dev_interrupt_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294                                 uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
297                         uint32_t base);
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
299                         uint16_t num);
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303                                                 struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307                                              struct i40e_macvlan_filter *mv_f,
308                                              int num,
309                                              uint16_t vlan);
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312                                     struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314                                       struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316                                         struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321                                 enum rte_filter_op filter_op,
322                                 void *arg);
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324                                 enum rte_filter_type filter_type,
325                                 enum rte_filter_op filter_op,
326                                 void *arg);
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328                                   struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
334                                                      uint16_t seid,
335                                                      uint16_t rule_type,
336                                                      uint16_t *entries,
337                                                      uint16_t count,
338                                                      uint16_t rule_id);
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340                         struct rte_eth_mirror_conf *mirror_conf,
341                         uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
343
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347                                            struct timespec *timestamp,
348                                            uint32_t flags);
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350                                            struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
352
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356                                    struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358                                     const struct timespec *timestamp);
359
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
361                                          uint16_t queue_id);
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
363                                           uint16_t queue_id);
364
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366                          struct rte_dev_reg_info *regs);
367
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
369
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371                            struct rte_dev_eeprom_info *eeprom);
372
373 static int i40e_get_module_info(struct rte_eth_dev *dev,
374                                 struct rte_eth_dev_module_info *modinfo);
375 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
376                                   struct rte_dev_eeprom_info *info);
377
378 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
379                                       struct ether_addr *mac_addr);
380
381 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
382
383 static int i40e_ethertype_filter_convert(
384         const struct rte_eth_ethertype_filter *input,
385         struct i40e_ethertype_filter *filter);
386 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
387                                    struct i40e_ethertype_filter *filter);
388
389 static int i40e_tunnel_filter_convert(
390         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
391         struct i40e_tunnel_filter *tunnel_filter);
392 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
393                                 struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
395
396 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
397 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
398 static void i40e_filter_restore(struct i40e_pf *pf);
399 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
400
401 int i40e_logtype_init;
402 int i40e_logtype_driver;
403
404 static const struct rte_pci_id pci_id_i40e_map[] = {
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
425         { .vendor_id = 0, /* sentinel */ },
426 };
427
428 static const struct eth_dev_ops i40e_eth_dev_ops = {
429         .dev_configure                = i40e_dev_configure,
430         .dev_start                    = i40e_dev_start,
431         .dev_stop                     = i40e_dev_stop,
432         .dev_close                    = i40e_dev_close,
433         .dev_reset                    = i40e_dev_reset,
434         .promiscuous_enable           = i40e_dev_promiscuous_enable,
435         .promiscuous_disable          = i40e_dev_promiscuous_disable,
436         .allmulticast_enable          = i40e_dev_allmulticast_enable,
437         .allmulticast_disable         = i40e_dev_allmulticast_disable,
438         .dev_set_link_up              = i40e_dev_set_link_up,
439         .dev_set_link_down            = i40e_dev_set_link_down,
440         .link_update                  = i40e_dev_link_update,
441         .stats_get                    = i40e_dev_stats_get,
442         .xstats_get                   = i40e_dev_xstats_get,
443         .xstats_get_names             = i40e_dev_xstats_get_names,
444         .stats_reset                  = i40e_dev_stats_reset,
445         .xstats_reset                 = i40e_dev_stats_reset,
446         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
447         .fw_version_get               = i40e_fw_version_get,
448         .dev_infos_get                = i40e_dev_info_get,
449         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
450         .vlan_filter_set              = i40e_vlan_filter_set,
451         .vlan_tpid_set                = i40e_vlan_tpid_set,
452         .vlan_offload_set             = i40e_vlan_offload_set,
453         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
454         .vlan_pvid_set                = i40e_vlan_pvid_set,
455         .rx_queue_start               = i40e_dev_rx_queue_start,
456         .rx_queue_stop                = i40e_dev_rx_queue_stop,
457         .tx_queue_start               = i40e_dev_tx_queue_start,
458         .tx_queue_stop                = i40e_dev_tx_queue_stop,
459         .rx_queue_setup               = i40e_dev_rx_queue_setup,
460         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
461         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
462         .rx_queue_release             = i40e_dev_rx_queue_release,
463         .rx_queue_count               = i40e_dev_rx_queue_count,
464         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
465         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
466         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
467         .tx_queue_setup               = i40e_dev_tx_queue_setup,
468         .tx_queue_release             = i40e_dev_tx_queue_release,
469         .dev_led_on                   = i40e_dev_led_on,
470         .dev_led_off                  = i40e_dev_led_off,
471         .flow_ctrl_get                = i40e_flow_ctrl_get,
472         .flow_ctrl_set                = i40e_flow_ctrl_set,
473         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
474         .mac_addr_add                 = i40e_macaddr_add,
475         .mac_addr_remove              = i40e_macaddr_remove,
476         .reta_update                  = i40e_dev_rss_reta_update,
477         .reta_query                   = i40e_dev_rss_reta_query,
478         .rss_hash_update              = i40e_dev_rss_hash_update,
479         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
480         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
481         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
482         .filter_ctrl                  = i40e_dev_filter_ctrl,
483         .rxq_info_get                 = i40e_rxq_info_get,
484         .txq_info_get                 = i40e_txq_info_get,
485         .mirror_rule_set              = i40e_mirror_rule_set,
486         .mirror_rule_reset            = i40e_mirror_rule_reset,
487         .timesync_enable              = i40e_timesync_enable,
488         .timesync_disable             = i40e_timesync_disable,
489         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
490         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
491         .get_dcb_info                 = i40e_dev_get_dcb_info,
492         .timesync_adjust_time         = i40e_timesync_adjust_time,
493         .timesync_read_time           = i40e_timesync_read_time,
494         .timesync_write_time          = i40e_timesync_write_time,
495         .get_reg                      = i40e_get_regs,
496         .get_eeprom_length            = i40e_get_eeprom_length,
497         .get_eeprom                   = i40e_get_eeprom,
498         .get_module_info              = i40e_get_module_info,
499         .get_module_eeprom            = i40e_get_module_eeprom,
500         .mac_addr_set                 = i40e_set_default_mac_addr,
501         .mtu_set                      = i40e_dev_mtu_set,
502         .tm_ops_get                   = i40e_tm_ops_get,
503 };
504
505 /* store statistics names and its offset in stats structure */
506 struct rte_i40e_xstats_name_off {
507         char name[RTE_ETH_XSTATS_NAME_SIZE];
508         unsigned offset;
509 };
510
511 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
512         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
513         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
514         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
515         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
516         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
517                 rx_unknown_protocol)},
518         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
519         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
520         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
521         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
522 };
523
524 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
525                 sizeof(rte_i40e_stats_strings[0]))
526
527 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
528         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
529                 tx_dropped_link_down)},
530         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
531         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
532                 illegal_bytes)},
533         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
534         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
535                 mac_local_faults)},
536         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
537                 mac_remote_faults)},
538         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
539                 rx_length_errors)},
540         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
541         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
542         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
543         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
544         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
545         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_127)},
547         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_255)},
549         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
550                 rx_size_511)},
551         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
552                 rx_size_1023)},
553         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
554                 rx_size_1522)},
555         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
556                 rx_size_big)},
557         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
558                 rx_undersize)},
559         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
560                 rx_oversize)},
561         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
562                 mac_short_packet_dropped)},
563         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
564                 rx_fragments)},
565         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
566         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
567         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_127)},
569         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_255)},
571         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
572                 tx_size_511)},
573         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
574                 tx_size_1023)},
575         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
576                 tx_size_1522)},
577         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
578                 tx_size_big)},
579         {"rx_flow_director_atr_match_packets",
580                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
581         {"rx_flow_director_sb_match_packets",
582                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
583         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
584                 tx_lpi_status)},
585         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
586                 rx_lpi_status)},
587         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
588                 tx_lpi_count)},
589         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
590                 rx_lpi_count)},
591 };
592
593 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
594                 sizeof(rte_i40e_hw_port_strings[0]))
595
596 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
597         {"xon_packets", offsetof(struct i40e_hw_port_stats,
598                 priority_xon_rx)},
599         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xoff_rx)},
601 };
602
603 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
604                 sizeof(rte_i40e_rxq_prio_strings[0]))
605
606 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
607         {"xon_packets", offsetof(struct i40e_hw_port_stats,
608                 priority_xon_tx)},
609         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
610                 priority_xoff_tx)},
611         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
612                 priority_xon_2_xoff)},
613 };
614
615 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
616                 sizeof(rte_i40e_txq_prio_strings[0]))
617
618 static int
619 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
620         struct rte_pci_device *pci_dev)
621 {
622         char name[RTE_ETH_NAME_MAX_LEN];
623         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
624         int i, retval;
625
626         if (pci_dev->device.devargs) {
627                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
628                                 &eth_da);
629                 if (retval)
630                         return retval;
631         }
632
633         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
634                 sizeof(struct i40e_adapter),
635                 eth_dev_pci_specific_init, pci_dev,
636                 eth_i40e_dev_init, NULL);
637
638         if (retval || eth_da.nb_representor_ports < 1)
639                 return retval;
640
641         /* probe VF representor ports */
642         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
643                 pci_dev->device.name);
644
645         if (pf_ethdev == NULL)
646                 return -ENODEV;
647
648         for (i = 0; i < eth_da.nb_representor_ports; i++) {
649                 struct i40e_vf_representor representor = {
650                         .vf_id = eth_da.representor_ports[i],
651                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
652                                 pf_ethdev->data->dev_private)->switch_domain_id,
653                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
654                                 pf_ethdev->data->dev_private)
655                 };
656
657                 /* representor port net_bdf_port */
658                 snprintf(name, sizeof(name), "net_%s_representor_%d",
659                         pci_dev->device.name, eth_da.representor_ports[i]);
660
661                 retval = rte_eth_dev_create(&pci_dev->device, name,
662                         sizeof(struct i40e_vf_representor), NULL, NULL,
663                         i40e_vf_representor_init, &representor);
664
665                 if (retval)
666                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
667                                 "representor %s.", name);
668         }
669
670         return 0;
671 }
672
673 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
674 {
675         struct rte_eth_dev *ethdev;
676
677         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
678         if (!ethdev)
679                 return -ENODEV;
680
681
682         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
683                 return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
684         else
685                 return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
686 }
687
688 static struct rte_pci_driver rte_i40e_pmd = {
689         .id_table = pci_id_i40e_map,
690         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
691                      RTE_PCI_DRV_IOVA_AS_VA,
692         .probe = eth_i40e_pci_probe,
693         .remove = eth_i40e_pci_remove,
694 };
695
696 static inline void
697 i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
698 {
699         i40e_write_rx_ctl(hw, reg_addr, reg_val);
700         PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
701                     "with value 0x%08x",
702                     reg_addr, reg_val);
703 }
704
705 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
706 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
707 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
708
709 #ifndef I40E_GLQF_ORT
710 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
711 #endif
712 #ifndef I40E_GLQF_PIT
713 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
714 #endif
715 #ifndef I40E_GLQF_L3_MAP
716 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
717 #endif
718
719 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
720 {
721         /*
722          * Initialize registers for parsing packet type of QinQ
723          * This should be removed from code once proper
724          * configuration API is added to avoid configuration conflicts
725          * between ports of the same device.
726          */
727         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
728         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
729         i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
730 }
731
732 static inline void i40e_config_automask(struct i40e_pf *pf)
733 {
734         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
735         uint32_t val;
736
737         /* INTENA flag is not auto-cleared for interrupt */
738         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
739         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
740                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
741
742         /* If support multi-driver, PF will use INT0. */
743         if (!pf->support_multi_driver)
744                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
745
746         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
747 }
748
749 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
750
751 /*
752  * Add a ethertype filter to drop all flow control frames transmitted
753  * from VSIs.
754 */
755 static void
756 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
757 {
758         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
759         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
760                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
761                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
762         int ret;
763
764         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
765                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
766                                 pf->main_vsi_seid, 0,
767                                 TRUE, NULL, NULL);
768         if (ret)
769                 PMD_INIT_LOG(ERR,
770                         "Failed to add filter to drop flow control frames from VSIs.");
771 }
772
773 static int
774 floating_veb_list_handler(__rte_unused const char *key,
775                           const char *floating_veb_value,
776                           void *opaque)
777 {
778         int idx = 0;
779         unsigned int count = 0;
780         char *end = NULL;
781         int min, max;
782         bool *vf_floating_veb = opaque;
783
784         while (isblank(*floating_veb_value))
785                 floating_veb_value++;
786
787         /* Reset floating VEB configuration for VFs */
788         for (idx = 0; idx < I40E_MAX_VF; idx++)
789                 vf_floating_veb[idx] = false;
790
791         min = I40E_MAX_VF;
792         do {
793                 while (isblank(*floating_veb_value))
794                         floating_veb_value++;
795                 if (*floating_veb_value == '\0')
796                         return -1;
797                 errno = 0;
798                 idx = strtoul(floating_veb_value, &end, 10);
799                 if (errno || end == NULL)
800                         return -1;
801                 while (isblank(*end))
802                         end++;
803                 if (*end == '-') {
804                         min = idx;
805                 } else if ((*end == ';') || (*end == '\0')) {
806                         max = idx;
807                         if (min == I40E_MAX_VF)
808                                 min = idx;
809                         if (max >= I40E_MAX_VF)
810                                 max = I40E_MAX_VF - 1;
811                         for (idx = min; idx <= max; idx++) {
812                                 vf_floating_veb[idx] = true;
813                                 count++;
814                         }
815                         min = I40E_MAX_VF;
816                 } else {
817                         return -1;
818                 }
819                 floating_veb_value = end + 1;
820         } while (*end != '\0');
821
822         if (count == 0)
823                 return -1;
824
825         return 0;
826 }
827
828 static void
829 config_vf_floating_veb(struct rte_devargs *devargs,
830                        uint16_t floating_veb,
831                        bool *vf_floating_veb)
832 {
833         struct rte_kvargs *kvlist;
834         int i;
835         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
836
837         if (!floating_veb)
838                 return;
839         /* All the VFs attach to the floating VEB by default
840          * when the floating VEB is enabled.
841          */
842         for (i = 0; i < I40E_MAX_VF; i++)
843                 vf_floating_veb[i] = true;
844
845         if (devargs == NULL)
846                 return;
847
848         kvlist = rte_kvargs_parse(devargs->args, NULL);
849         if (kvlist == NULL)
850                 return;
851
852         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
853                 rte_kvargs_free(kvlist);
854                 return;
855         }
856         /* When the floating_veb_list parameter exists, all the VFs
857          * will attach to the legacy VEB firstly, then configure VFs
858          * to the floating VEB according to the floating_veb_list.
859          */
860         if (rte_kvargs_process(kvlist, floating_veb_list,
861                                floating_veb_list_handler,
862                                vf_floating_veb) < 0) {
863                 rte_kvargs_free(kvlist);
864                 return;
865         }
866         rte_kvargs_free(kvlist);
867 }
868
869 static int
870 i40e_check_floating_handler(__rte_unused const char *key,
871                             const char *value,
872                             __rte_unused void *opaque)
873 {
874         if (strcmp(value, "1"))
875                 return -1;
876
877         return 0;
878 }
879
880 static int
881 is_floating_veb_supported(struct rte_devargs *devargs)
882 {
883         struct rte_kvargs *kvlist;
884         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
885
886         if (devargs == NULL)
887                 return 0;
888
889         kvlist = rte_kvargs_parse(devargs->args, NULL);
890         if (kvlist == NULL)
891                 return 0;
892
893         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
894                 rte_kvargs_free(kvlist);
895                 return 0;
896         }
897         /* Floating VEB is enabled when there's key-value:
898          * enable_floating_veb=1
899          */
900         if (rte_kvargs_process(kvlist, floating_veb_key,
901                                i40e_check_floating_handler, NULL) < 0) {
902                 rte_kvargs_free(kvlist);
903                 return 0;
904         }
905         rte_kvargs_free(kvlist);
906
907         return 1;
908 }
909
910 static void
911 config_floating_veb(struct rte_eth_dev *dev)
912 {
913         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
914         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
915         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
916
917         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
918
919         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
920                 pf->floating_veb =
921                         is_floating_veb_supported(pci_dev->device.devargs);
922                 config_vf_floating_veb(pci_dev->device.devargs,
923                                        pf->floating_veb,
924                                        pf->floating_veb_list);
925         } else {
926                 pf->floating_veb = false;
927         }
928 }
929
930 #define I40E_L2_TAGS_S_TAG_SHIFT 1
931 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
932
933 static int
934 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
935 {
936         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
937         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
938         char ethertype_hash_name[RTE_HASH_NAMESIZE];
939         int ret;
940
941         struct rte_hash_parameters ethertype_hash_params = {
942                 .name = ethertype_hash_name,
943                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
944                 .key_len = sizeof(struct i40e_ethertype_filter_input),
945                 .hash_func = rte_hash_crc,
946                 .hash_func_init_val = 0,
947                 .socket_id = rte_socket_id(),
948         };
949
950         /* Initialize ethertype filter rule list and hash */
951         TAILQ_INIT(&ethertype_rule->ethertype_list);
952         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
953                  "ethertype_%s", dev->device->name);
954         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
955         if (!ethertype_rule->hash_table) {
956                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
957                 return -EINVAL;
958         }
959         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
960                                        sizeof(struct i40e_ethertype_filter *) *
961                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
962                                        0);
963         if (!ethertype_rule->hash_map) {
964                 PMD_INIT_LOG(ERR,
965                              "Failed to allocate memory for ethertype hash map!");
966                 ret = -ENOMEM;
967                 goto err_ethertype_hash_map_alloc;
968         }
969
970         return 0;
971
972 err_ethertype_hash_map_alloc:
973         rte_hash_free(ethertype_rule->hash_table);
974
975         return ret;
976 }
977
978 static int
979 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
980 {
981         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
982         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
983         char tunnel_hash_name[RTE_HASH_NAMESIZE];
984         int ret;
985
986         struct rte_hash_parameters tunnel_hash_params = {
987                 .name = tunnel_hash_name,
988                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
989                 .key_len = sizeof(struct i40e_tunnel_filter_input),
990                 .hash_func = rte_hash_crc,
991                 .hash_func_init_val = 0,
992                 .socket_id = rte_socket_id(),
993         };
994
995         /* Initialize tunnel filter rule list and hash */
996         TAILQ_INIT(&tunnel_rule->tunnel_list);
997         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
998                  "tunnel_%s", dev->device->name);
999         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1000         if (!tunnel_rule->hash_table) {
1001                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1002                 return -EINVAL;
1003         }
1004         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1005                                     sizeof(struct i40e_tunnel_filter *) *
1006                                     I40E_MAX_TUNNEL_FILTER_NUM,
1007                                     0);
1008         if (!tunnel_rule->hash_map) {
1009                 PMD_INIT_LOG(ERR,
1010                              "Failed to allocate memory for tunnel hash map!");
1011                 ret = -ENOMEM;
1012                 goto err_tunnel_hash_map_alloc;
1013         }
1014
1015         return 0;
1016
1017 err_tunnel_hash_map_alloc:
1018         rte_hash_free(tunnel_rule->hash_table);
1019
1020         return ret;
1021 }
1022
1023 static int
1024 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1025 {
1026         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1027         struct i40e_fdir_info *fdir_info = &pf->fdir;
1028         char fdir_hash_name[RTE_HASH_NAMESIZE];
1029         int ret;
1030
1031         struct rte_hash_parameters fdir_hash_params = {
1032                 .name = fdir_hash_name,
1033                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1034                 .key_len = sizeof(struct i40e_fdir_input),
1035                 .hash_func = rte_hash_crc,
1036                 .hash_func_init_val = 0,
1037                 .socket_id = rte_socket_id(),
1038         };
1039
1040         /* Initialize flow director filter rule list and hash */
1041         TAILQ_INIT(&fdir_info->fdir_list);
1042         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1043                  "fdir_%s", dev->device->name);
1044         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1045         if (!fdir_info->hash_table) {
1046                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1047                 return -EINVAL;
1048         }
1049         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1050                                           sizeof(struct i40e_fdir_filter *) *
1051                                           I40E_MAX_FDIR_FILTER_NUM,
1052                                           0);
1053         if (!fdir_info->hash_map) {
1054                 PMD_INIT_LOG(ERR,
1055                              "Failed to allocate memory for fdir hash map!");
1056                 ret = -ENOMEM;
1057                 goto err_fdir_hash_map_alloc;
1058         }
1059         return 0;
1060
1061 err_fdir_hash_map_alloc:
1062         rte_hash_free(fdir_info->hash_table);
1063
1064         return ret;
1065 }
1066
1067 static void
1068 i40e_init_customized_info(struct i40e_pf *pf)
1069 {
1070         int i;
1071
1072         /* Initialize customized pctype */
1073         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1074                 pf->customized_pctype[i].index = i;
1075                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1076                 pf->customized_pctype[i].valid = false;
1077         }
1078
1079         pf->gtp_support = false;
1080 }
1081
1082 void
1083 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1084 {
1085         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1087         struct i40e_queue_regions *info = &pf->queue_region;
1088         uint16_t i;
1089
1090         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1091                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1092
1093         memset(info, 0, sizeof(struct i40e_queue_regions));
1094 }
1095
1096 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
1097
1098 static int
1099 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1100                                const char *value,
1101                                void *opaque)
1102 {
1103         struct i40e_pf *pf;
1104         unsigned long support_multi_driver;
1105         char *end;
1106
1107         pf = (struct i40e_pf *)opaque;
1108
1109         errno = 0;
1110         support_multi_driver = strtoul(value, &end, 10);
1111         if (errno != 0 || end == value || *end != 0) {
1112                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1113                 return -(EINVAL);
1114         }
1115
1116         if (support_multi_driver == 1 || support_multi_driver == 0)
1117                 pf->support_multi_driver = (bool)support_multi_driver;
1118         else
1119                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1120                             "enable global configuration by default."
1121                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1122         return 0;
1123 }
1124
1125 static int
1126 i40e_support_multi_driver(struct rte_eth_dev *dev)
1127 {
1128         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1129         static const char *const valid_keys[] = {
1130                 ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
1131         struct rte_kvargs *kvlist;
1132
1133         /* Enable global configuration by default */
1134         pf->support_multi_driver = false;
1135
1136         if (!dev->device->devargs)
1137                 return 0;
1138
1139         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1140         if (!kvlist)
1141                 return -EINVAL;
1142
1143         if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
1144                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1145                             "the first invalid or last valid one is used !",
1146                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1147
1148         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1149                                i40e_parse_multi_drv_handler, pf) < 0) {
1150                 rte_kvargs_free(kvlist);
1151                 return -EINVAL;
1152         }
1153
1154         rte_kvargs_free(kvlist);
1155         return 0;
1156 }
1157
1158 static int
1159 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1160 {
1161         struct rte_pci_device *pci_dev;
1162         struct rte_intr_handle *intr_handle;
1163         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1164         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165         struct i40e_vsi *vsi;
1166         int ret;
1167         uint32_t len;
1168         uint8_t aq_fail = 0;
1169
1170         PMD_INIT_FUNC_TRACE();
1171
1172         dev->dev_ops = &i40e_eth_dev_ops;
1173         dev->rx_pkt_burst = i40e_recv_pkts;
1174         dev->tx_pkt_burst = i40e_xmit_pkts;
1175         dev->tx_pkt_prepare = i40e_prep_pkts;
1176
1177         /* for secondary processes, we don't initialise any further as primary
1178          * has already done this work. Only check we don't need a different
1179          * RX function */
1180         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1181                 i40e_set_rx_function(dev);
1182                 i40e_set_tx_function(dev);
1183                 return 0;
1184         }
1185         i40e_set_default_ptype_table(dev);
1186         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1187         intr_handle = &pci_dev->intr_handle;
1188
1189         rte_eth_copy_pci_info(dev, pci_dev);
1190
1191         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1192         pf->adapter->eth_dev = dev;
1193         pf->dev_data = dev->data;
1194
1195         hw->back = I40E_PF_TO_ADAPTER(pf);
1196         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1197         if (!hw->hw_addr) {
1198                 PMD_INIT_LOG(ERR,
1199                         "Hardware is not available, as address is NULL");
1200                 return -ENODEV;
1201         }
1202
1203         hw->vendor_id = pci_dev->id.vendor_id;
1204         hw->device_id = pci_dev->id.device_id;
1205         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1206         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1207         hw->bus.device = pci_dev->addr.devid;
1208         hw->bus.func = pci_dev->addr.function;
1209         hw->adapter_stopped = 0;
1210
1211         /* Check if need to support multi-driver */
1212         i40e_support_multi_driver(dev);
1213
1214         /* Make sure all is clean before doing PF reset */
1215         i40e_clear_hw(hw);
1216
1217         /* Initialize the hardware */
1218         i40e_hw_init(dev);
1219
1220         /* Reset here to make sure all is clean for each PF */
1221         ret = i40e_pf_reset(hw);
1222         if (ret) {
1223                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1224                 return ret;
1225         }
1226
1227         /* Initialize the shared code (base driver) */
1228         ret = i40e_init_shared_code(hw);
1229         if (ret) {
1230                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1231                 return ret;
1232         }
1233
1234         i40e_config_automask(pf);
1235
1236         i40e_set_default_pctype_table(dev);
1237
1238         /*
1239          * To work around the NVM issue, initialize registers
1240          * for packet type of QinQ by software.
1241          * It should be removed once issues are fixed in NVM.
1242          */
1243         if (!pf->support_multi_driver)
1244                 i40e_GLQF_reg_init(hw);
1245
1246         /* Initialize the input set for filters (hash and fd) to default value */
1247         i40e_filter_input_set_init(pf);
1248
1249         /* Initialize the parameters for adminq */
1250         i40e_init_adminq_parameter(hw);
1251         ret = i40e_init_adminq(hw);
1252         if (ret != I40E_SUCCESS) {
1253                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1254                 return -EIO;
1255         }
1256         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1257                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1258                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1259                      ((hw->nvm.version >> 12) & 0xf),
1260                      ((hw->nvm.version >> 4) & 0xff),
1261                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1262
1263         /* initialise the L3_MAP register */
1264         if (!pf->support_multi_driver) {
1265                 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1266                                                    0x00000028,  NULL);
1267                 if (ret)
1268                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1269                                      ret);
1270                 PMD_INIT_LOG(DEBUG,
1271                              "Global register 0x%08x is changed with 0x28",
1272                              I40E_GLQF_L3_MAP(40));
1273                 i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
1274         }
1275
1276         /* Need the special FW version to support floating VEB */
1277         config_floating_veb(dev);
1278         /* Clear PXE mode */
1279         i40e_clear_pxe_mode(hw);
1280         i40e_dev_sync_phy_type(hw);
1281
1282         /*
1283          * On X710, performance number is far from the expectation on recent
1284          * firmware versions. The fix for this issue may not be integrated in
1285          * the following firmware version. So the workaround in software driver
1286          * is needed. It needs to modify the initial values of 3 internal only
1287          * registers. Note that the workaround can be removed when it is fixed
1288          * in firmware in the future.
1289          */
1290         i40e_configure_registers(hw);
1291
1292         /* Get hw capabilities */
1293         ret = i40e_get_cap(hw);
1294         if (ret != I40E_SUCCESS) {
1295                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1296                 goto err_get_capabilities;
1297         }
1298
1299         /* Initialize parameters for PF */
1300         ret = i40e_pf_parameter_init(dev);
1301         if (ret != 0) {
1302                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1303                 goto err_parameter_init;
1304         }
1305
1306         /* Initialize the queue management */
1307         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1308         if (ret < 0) {
1309                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1310                 goto err_qp_pool_init;
1311         }
1312         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1313                                 hw->func_caps.num_msix_vectors - 1);
1314         if (ret < 0) {
1315                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1316                 goto err_msix_pool_init;
1317         }
1318
1319         /* Initialize lan hmc */
1320         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1321                                 hw->func_caps.num_rx_qp, 0, 0);
1322         if (ret != I40E_SUCCESS) {
1323                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1324                 goto err_init_lan_hmc;
1325         }
1326
1327         /* Configure lan hmc */
1328         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1329         if (ret != I40E_SUCCESS) {
1330                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1331                 goto err_configure_lan_hmc;
1332         }
1333
1334         /* Get and check the mac address */
1335         i40e_get_mac_addr(hw, hw->mac.addr);
1336         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1337                 PMD_INIT_LOG(ERR, "mac address is not valid");
1338                 ret = -EIO;
1339                 goto err_get_mac_addr;
1340         }
1341         /* Copy the permanent MAC address */
1342         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1343                         (struct ether_addr *) hw->mac.perm_addr);
1344
1345         /* Disable flow control */
1346         hw->fc.requested_mode = I40E_FC_NONE;
1347         i40e_set_fc(hw, &aq_fail, TRUE);
1348
1349         /* Set the global registers with default ether type value */
1350         if (!pf->support_multi_driver) {
1351                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1352                                          ETHER_TYPE_VLAN);
1353                 if (ret != I40E_SUCCESS) {
1354                         PMD_INIT_LOG(ERR,
1355                                      "Failed to set the default outer "
1356                                      "VLAN ether type");
1357                         goto err_setup_pf_switch;
1358                 }
1359         }
1360
1361         /* PF setup, which includes VSI setup */
1362         ret = i40e_pf_setup(pf);
1363         if (ret) {
1364                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1365                 goto err_setup_pf_switch;
1366         }
1367
1368         /* reset all stats of the device, including pf and main vsi */
1369         i40e_dev_stats_reset(dev);
1370
1371         vsi = pf->main_vsi;
1372
1373         /* Disable double vlan by default */
1374         i40e_vsi_config_double_vlan(vsi, FALSE);
1375
1376         /* Disable S-TAG identification when floating_veb is disabled */
1377         if (!pf->floating_veb) {
1378                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1379                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1380                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1381                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1382                 }
1383         }
1384
1385         if (!vsi->max_macaddrs)
1386                 len = ETHER_ADDR_LEN;
1387         else
1388                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1389
1390         /* Should be after VSI initialized */
1391         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1392         if (!dev->data->mac_addrs) {
1393                 PMD_INIT_LOG(ERR,
1394                         "Failed to allocated memory for storing mac address");
1395                 goto err_mac_alloc;
1396         }
1397         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1398                                         &dev->data->mac_addrs[0]);
1399
1400         /* Init dcb to sw mode by default */
1401         ret = i40e_dcb_init_configure(dev, TRUE);
1402         if (ret != I40E_SUCCESS) {
1403                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1404                 pf->flags &= ~I40E_FLAG_DCB;
1405         }
1406         /* Update HW struct after DCB configuration */
1407         i40e_get_cap(hw);
1408
1409         /* initialize pf host driver to setup SRIOV resource if applicable */
1410         i40e_pf_host_init(dev);
1411
1412         /* register callback func to eal lib */
1413         rte_intr_callback_register(intr_handle,
1414                                    i40e_dev_interrupt_handler, dev);
1415
1416         /* configure and enable device interrupt */
1417         i40e_pf_config_irq0(hw, TRUE);
1418         i40e_pf_enable_irq0(hw);
1419
1420         /* enable uio intr after callback register */
1421         rte_intr_enable(intr_handle);
1422
1423         /* By default disable flexible payload in global configuration */
1424         if (!pf->support_multi_driver)
1425                 i40e_flex_payload_reg_set_default(hw);
1426
1427         /*
1428          * Add an ethertype filter to drop all flow control frames transmitted
1429          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1430          * frames to wire.
1431          */
1432         i40e_add_tx_flow_control_drop_filter(pf);
1433
1434         /* Set the max frame size to 0x2600 by default,
1435          * in case other drivers changed the default value.
1436          */
1437         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1438
1439         /* initialize mirror rule list */
1440         TAILQ_INIT(&pf->mirror_list);
1441
1442         /* initialize Traffic Manager configuration */
1443         i40e_tm_conf_init(dev);
1444
1445         /* Initialize customized information */
1446         i40e_init_customized_info(pf);
1447
1448         ret = i40e_init_ethtype_filter_list(dev);
1449         if (ret < 0)
1450                 goto err_init_ethtype_filter_list;
1451         ret = i40e_init_tunnel_filter_list(dev);
1452         if (ret < 0)
1453                 goto err_init_tunnel_filter_list;
1454         ret = i40e_init_fdir_filter_list(dev);
1455         if (ret < 0)
1456                 goto err_init_fdir_filter_list;
1457
1458         /* initialize queue region configuration */
1459         i40e_init_queue_region_conf(dev);
1460
1461         /* initialize rss configuration from rte_flow */
1462         memset(&pf->rss_info, 0,
1463                 sizeof(struct i40e_rte_flow_rss_conf));
1464
1465         return 0;
1466
1467 err_init_fdir_filter_list:
1468         rte_free(pf->tunnel.hash_table);
1469         rte_free(pf->tunnel.hash_map);
1470 err_init_tunnel_filter_list:
1471         rte_free(pf->ethertype.hash_table);
1472         rte_free(pf->ethertype.hash_map);
1473 err_init_ethtype_filter_list:
1474         rte_free(dev->data->mac_addrs);
1475 err_mac_alloc:
1476         i40e_vsi_release(pf->main_vsi);
1477 err_setup_pf_switch:
1478 err_get_mac_addr:
1479 err_configure_lan_hmc:
1480         (void)i40e_shutdown_lan_hmc(hw);
1481 err_init_lan_hmc:
1482         i40e_res_pool_destroy(&pf->msix_pool);
1483 err_msix_pool_init:
1484         i40e_res_pool_destroy(&pf->qp_pool);
1485 err_qp_pool_init:
1486 err_parameter_init:
1487 err_get_capabilities:
1488         (void)i40e_shutdown_adminq(hw);
1489
1490         return ret;
1491 }
1492
1493 static void
1494 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1495 {
1496         struct i40e_ethertype_filter *p_ethertype;
1497         struct i40e_ethertype_rule *ethertype_rule;
1498
1499         ethertype_rule = &pf->ethertype;
1500         /* Remove all ethertype filter rules and hash */
1501         if (ethertype_rule->hash_map)
1502                 rte_free(ethertype_rule->hash_map);
1503         if (ethertype_rule->hash_table)
1504                 rte_hash_free(ethertype_rule->hash_table);
1505
1506         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1507                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1508                              p_ethertype, rules);
1509                 rte_free(p_ethertype);
1510         }
1511 }
1512
1513 static void
1514 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1515 {
1516         struct i40e_tunnel_filter *p_tunnel;
1517         struct i40e_tunnel_rule *tunnel_rule;
1518
1519         tunnel_rule = &pf->tunnel;
1520         /* Remove all tunnel director rules and hash */
1521         if (tunnel_rule->hash_map)
1522                 rte_free(tunnel_rule->hash_map);
1523         if (tunnel_rule->hash_table)
1524                 rte_hash_free(tunnel_rule->hash_table);
1525
1526         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1527                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1528                 rte_free(p_tunnel);
1529         }
1530 }
1531
1532 static void
1533 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1534 {
1535         struct i40e_fdir_filter *p_fdir;
1536         struct i40e_fdir_info *fdir_info;
1537
1538         fdir_info = &pf->fdir;
1539         /* Remove all flow director rules and hash */
1540         if (fdir_info->hash_map)
1541                 rte_free(fdir_info->hash_map);
1542         if (fdir_info->hash_table)
1543                 rte_hash_free(fdir_info->hash_table);
1544
1545         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1546                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1547                 rte_free(p_fdir);
1548         }
1549 }
1550
1551 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1552 {
1553         /*
1554          * Disable by default flexible payload
1555          * for corresponding L2/L3/L4 layers.
1556          */
1557         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1558         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1559         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1560         i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
1561 }
1562
1563 static int
1564 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1565 {
1566         struct i40e_pf *pf;
1567         struct rte_pci_device *pci_dev;
1568         struct rte_intr_handle *intr_handle;
1569         struct i40e_hw *hw;
1570         struct i40e_filter_control_settings settings;
1571         struct rte_flow *p_flow;
1572         int ret;
1573         uint8_t aq_fail = 0;
1574         int retries = 0;
1575
1576         PMD_INIT_FUNC_TRACE();
1577
1578         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1579                 return 0;
1580
1581         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1582         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1583         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1584         intr_handle = &pci_dev->intr_handle;
1585
1586         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
1587         if (ret)
1588                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
1589
1590         if (hw->adapter_stopped == 0)
1591                 i40e_dev_close(dev);
1592
1593         dev->dev_ops = NULL;
1594         dev->rx_pkt_burst = NULL;
1595         dev->tx_pkt_burst = NULL;
1596
1597         /* Clear PXE mode */
1598         i40e_clear_pxe_mode(hw);
1599
1600         /* Unconfigure filter control */
1601         memset(&settings, 0, sizeof(settings));
1602         ret = i40e_set_filter_control(hw, &settings);
1603         if (ret)
1604                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1605                                         ret);
1606
1607         /* Disable flow control */
1608         hw->fc.requested_mode = I40E_FC_NONE;
1609         i40e_set_fc(hw, &aq_fail, TRUE);
1610
1611         /* uninitialize pf host driver */
1612         i40e_pf_host_uninit(dev);
1613
1614         rte_free(dev->data->mac_addrs);
1615         dev->data->mac_addrs = NULL;
1616
1617         /* disable uio intr before callback unregister */
1618         rte_intr_disable(intr_handle);
1619
1620         /* unregister callback func to eal lib */
1621         do {
1622                 ret = rte_intr_callback_unregister(intr_handle,
1623                                 i40e_dev_interrupt_handler, dev);
1624                 if (ret >= 0) {
1625                         break;
1626                 } else if (ret != -EAGAIN) {
1627                         PMD_INIT_LOG(ERR,
1628                                  "intr callback unregister failed: %d",
1629                                  ret);
1630                         return ret;
1631                 }
1632                 i40e_msec_delay(500);
1633         } while (retries++ < 5);
1634
1635         i40e_rm_ethtype_filter_list(pf);
1636         i40e_rm_tunnel_filter_list(pf);
1637         i40e_rm_fdir_filter_list(pf);
1638
1639         /* Remove all flows */
1640         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1641                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1642                 rte_free(p_flow);
1643         }
1644
1645         /* Remove all Traffic Manager configuration */
1646         i40e_tm_conf_uninit(dev);
1647
1648         return 0;
1649 }
1650
1651 static int
1652 i40e_dev_configure(struct rte_eth_dev *dev)
1653 {
1654         struct i40e_adapter *ad =
1655                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1657         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1659         int i, ret;
1660
1661         ret = i40e_dev_sync_phy_type(hw);
1662         if (ret)
1663                 return ret;
1664
1665         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1666          * bulk allocation or vector Rx preconditions we will reset it.
1667          */
1668         ad->rx_bulk_alloc_allowed = true;
1669         ad->rx_vec_allowed = true;
1670         ad->tx_simple_allowed = true;
1671         ad->tx_vec_allowed = true;
1672
1673         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1674                 ret = i40e_fdir_setup(pf);
1675                 if (ret != I40E_SUCCESS) {
1676                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1677                         return -ENOTSUP;
1678                 }
1679                 ret = i40e_fdir_configure(dev);
1680                 if (ret < 0) {
1681                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1682                         goto err;
1683                 }
1684         } else
1685                 i40e_fdir_teardown(pf);
1686
1687         ret = i40e_dev_init_vlan(dev);
1688         if (ret < 0)
1689                 goto err;
1690
1691         /* VMDQ setup.
1692          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1693          *  RSS setting have different requirements.
1694          *  General PMD driver call sequence are NIC init, configure,
1695          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1696          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1697          *  applicable. So, VMDQ setting has to be done before
1698          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1699          *  For RSS setting, it will try to calculate actual configured RX queue
1700          *  number, which will be available after rx_queue_setup(). dev_start()
1701          *  function is good to place RSS setup.
1702          */
1703         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1704                 ret = i40e_vmdq_setup(dev);
1705                 if (ret)
1706                         goto err;
1707         }
1708
1709         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1710                 ret = i40e_dcb_setup(dev);
1711                 if (ret) {
1712                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1713                         goto err_dcb;
1714                 }
1715         }
1716
1717         TAILQ_INIT(&pf->flow_list);
1718
1719         return 0;
1720
1721 err_dcb:
1722         /* need to release vmdq resource if exists */
1723         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1724                 i40e_vsi_release(pf->vmdq[i].vsi);
1725                 pf->vmdq[i].vsi = NULL;
1726         }
1727         rte_free(pf->vmdq);
1728         pf->vmdq = NULL;
1729 err:
1730         /* need to release fdir resource if exists */
1731         i40e_fdir_teardown(pf);
1732         return ret;
1733 }
1734
1735 void
1736 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1737 {
1738         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1739         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1740         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1741         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1742         uint16_t msix_vect = vsi->msix_intr;
1743         uint16_t i;
1744
1745         for (i = 0; i < vsi->nb_qps; i++) {
1746                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1747                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1748                 rte_wmb();
1749         }
1750
1751         if (vsi->type != I40E_VSI_SRIOV) {
1752                 if (!rte_intr_allow_others(intr_handle)) {
1753                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1754                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1755                         I40E_WRITE_REG(hw,
1756                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1757                                        0);
1758                 } else {
1759                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1760                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1761                         I40E_WRITE_REG(hw,
1762                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1763                                                        msix_vect - 1), 0);
1764                 }
1765         } else {
1766                 uint32_t reg;
1767                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1768                         vsi->user_param + (msix_vect - 1);
1769
1770                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1771                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1772         }
1773         I40E_WRITE_FLUSH(hw);
1774 }
1775
1776 static void
1777 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1778                        int base_queue, int nb_queue,
1779                        uint16_t itr_idx)
1780 {
1781         int i;
1782         uint32_t val;
1783         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1784         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1785
1786         /* Bind all RX queues to allocated MSIX interrupt */
1787         for (i = 0; i < nb_queue; i++) {
1788                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1789                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1790                         ((base_queue + i + 1) <<
1791                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1792                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1793                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1794
1795                 if (i == nb_queue - 1)
1796                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1797                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1798         }
1799
1800         /* Write first RX queue to Link list register as the head element */
1801         if (vsi->type != I40E_VSI_SRIOV) {
1802                 uint16_t interval =
1803                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
1804                                                pf->support_multi_driver);
1805
1806                 if (msix_vect == I40E_MISC_VEC_ID) {
1807                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1808                                        (base_queue <<
1809                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1810                                        (0x0 <<
1811                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1812                         I40E_WRITE_REG(hw,
1813                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1814                                        interval);
1815                 } else {
1816                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1817                                        (base_queue <<
1818                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1819                                        (0x0 <<
1820                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1821                         I40E_WRITE_REG(hw,
1822                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1823                                                        msix_vect - 1),
1824                                        interval);
1825                 }
1826         } else {
1827                 uint32_t reg;
1828
1829                 if (msix_vect == I40E_MISC_VEC_ID) {
1830                         I40E_WRITE_REG(hw,
1831                                        I40E_VPINT_LNKLST0(vsi->user_param),
1832                                        (base_queue <<
1833                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1834                                        (0x0 <<
1835                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1836                 } else {
1837                         /* num_msix_vectors_vf needs to minus irq0 */
1838                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1839                                 vsi->user_param + (msix_vect - 1);
1840
1841                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1842                                        (base_queue <<
1843                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1844                                        (0x0 <<
1845                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1846                 }
1847         }
1848
1849         I40E_WRITE_FLUSH(hw);
1850 }
1851
1852 void
1853 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1854 {
1855         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1856         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1857         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1858         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1859         uint16_t msix_vect = vsi->msix_intr;
1860         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1861         uint16_t queue_idx = 0;
1862         int record = 0;
1863         int i;
1864
1865         for (i = 0; i < vsi->nb_qps; i++) {
1866                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1867                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1868         }
1869
1870         /* VF bind interrupt */
1871         if (vsi->type == I40E_VSI_SRIOV) {
1872                 __vsi_queues_bind_intr(vsi, msix_vect,
1873                                        vsi->base_queue, vsi->nb_qps,
1874                                        itr_idx);
1875                 return;
1876         }
1877
1878         /* PF & VMDq bind interrupt */
1879         if (rte_intr_dp_is_en(intr_handle)) {
1880                 if (vsi->type == I40E_VSI_MAIN) {
1881                         queue_idx = 0;
1882                         record = 1;
1883                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1884                         struct i40e_vsi *main_vsi =
1885                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1886                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1887                         record = 1;
1888                 }
1889         }
1890
1891         for (i = 0; i < vsi->nb_used_qps; i++) {
1892                 if (nb_msix <= 1) {
1893                         if (!rte_intr_allow_others(intr_handle))
1894                                 /* allow to share MISC_VEC_ID */
1895                                 msix_vect = I40E_MISC_VEC_ID;
1896
1897                         /* no enough msix_vect, map all to one */
1898                         __vsi_queues_bind_intr(vsi, msix_vect,
1899                                                vsi->base_queue + i,
1900                                                vsi->nb_used_qps - i,
1901                                                itr_idx);
1902                         for (; !!record && i < vsi->nb_used_qps; i++)
1903                                 intr_handle->intr_vec[queue_idx + i] =
1904                                         msix_vect;
1905                         break;
1906                 }
1907                 /* 1:1 queue/msix_vect mapping */
1908                 __vsi_queues_bind_intr(vsi, msix_vect,
1909                                        vsi->base_queue + i, 1,
1910                                        itr_idx);
1911                 if (!!record)
1912                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1913
1914                 msix_vect++;
1915                 nb_msix--;
1916         }
1917 }
1918
1919 static void
1920 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1921 {
1922         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1923         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1924         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1925         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1926         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1927         uint16_t msix_intr, i;
1928
1929         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1930                 for (i = 0; i < vsi->nb_msix; i++) {
1931                         msix_intr = vsi->msix_intr + i;
1932                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1933                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1934                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1935                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1936                 }
1937         else
1938                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1939                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1940                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1941                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1942
1943         I40E_WRITE_FLUSH(hw);
1944 }
1945
1946 static void
1947 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1948 {
1949         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1950         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1951         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1952         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1953         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1954         uint16_t msix_intr, i;
1955
1956         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1957                 for (i = 0; i < vsi->nb_msix; i++) {
1958                         msix_intr = vsi->msix_intr + i;
1959                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1960                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1961                 }
1962         else
1963                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1964                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1965
1966         I40E_WRITE_FLUSH(hw);
1967 }
1968
1969 static inline uint8_t
1970 i40e_parse_link_speeds(uint16_t link_speeds)
1971 {
1972         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1973
1974         if (link_speeds & ETH_LINK_SPEED_40G)
1975                 link_speed |= I40E_LINK_SPEED_40GB;
1976         if (link_speeds & ETH_LINK_SPEED_25G)
1977                 link_speed |= I40E_LINK_SPEED_25GB;
1978         if (link_speeds & ETH_LINK_SPEED_20G)
1979                 link_speed |= I40E_LINK_SPEED_20GB;
1980         if (link_speeds & ETH_LINK_SPEED_10G)
1981                 link_speed |= I40E_LINK_SPEED_10GB;
1982         if (link_speeds & ETH_LINK_SPEED_1G)
1983                 link_speed |= I40E_LINK_SPEED_1GB;
1984         if (link_speeds & ETH_LINK_SPEED_100M)
1985                 link_speed |= I40E_LINK_SPEED_100MB;
1986
1987         return link_speed;
1988 }
1989
1990 static int
1991 i40e_phy_conf_link(struct i40e_hw *hw,
1992                    uint8_t abilities,
1993                    uint8_t force_speed,
1994                    bool is_up)
1995 {
1996         enum i40e_status_code status;
1997         struct i40e_aq_get_phy_abilities_resp phy_ab;
1998         struct i40e_aq_set_phy_config phy_conf;
1999         enum i40e_aq_phy_type cnt;
2000         uint32_t phy_type_mask = 0;
2001
2002         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2003                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2004                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2005                         I40E_AQ_PHY_FLAG_LOW_POWER;
2006         const uint8_t advt = I40E_LINK_SPEED_40GB |
2007                         I40E_LINK_SPEED_25GB |
2008                         I40E_LINK_SPEED_10GB |
2009                         I40E_LINK_SPEED_1GB |
2010                         I40E_LINK_SPEED_100MB;
2011         int ret = -ENOTSUP;
2012
2013
2014         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2015                                               NULL);
2016         if (status)
2017                 return ret;
2018
2019         /* If link already up, no need to set up again */
2020         if (is_up && phy_ab.phy_type != 0)
2021                 return I40E_SUCCESS;
2022
2023         memset(&phy_conf, 0, sizeof(phy_conf));
2024
2025         /* bits 0-2 use the values from get_phy_abilities_resp */
2026         abilities &= ~mask;
2027         abilities |= phy_ab.abilities & mask;
2028
2029         /* update ablities and speed */
2030         if (abilities & I40E_AQ_PHY_AN_ENABLED)
2031                 phy_conf.link_speed = advt;
2032         else
2033                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
2034
2035         phy_conf.abilities = abilities;
2036
2037
2038
2039         /* To enable link, phy_type mask needs to include each type */
2040         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
2041                 phy_type_mask |= 1 << cnt;
2042
2043         /* use get_phy_abilities_resp value for the rest */
2044         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2045         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2046                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2047                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2048         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2049         phy_conf.eee_capability = phy_ab.eee_capability;
2050         phy_conf.eeer = phy_ab.eeer_val;
2051         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2052
2053         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2054                     phy_ab.abilities, phy_ab.link_speed);
2055         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2056                     phy_conf.abilities, phy_conf.link_speed);
2057
2058         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2059         if (status)
2060                 return ret;
2061
2062         return I40E_SUCCESS;
2063 }
2064
2065 static int
2066 i40e_apply_link_speed(struct rte_eth_dev *dev)
2067 {
2068         uint8_t speed;
2069         uint8_t abilities = 0;
2070         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2071         struct rte_eth_conf *conf = &dev->data->dev_conf;
2072
2073         speed = i40e_parse_link_speeds(conf->link_speeds);
2074         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2075         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
2076                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2077         abilities |= I40E_AQ_PHY_LINK_ENABLED;
2078
2079         return i40e_phy_conf_link(hw, abilities, speed, true);
2080 }
2081
2082 static int
2083 i40e_dev_start(struct rte_eth_dev *dev)
2084 {
2085         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2086         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2087         struct i40e_vsi *main_vsi = pf->main_vsi;
2088         int ret, i;
2089         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2090         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2091         uint32_t intr_vector = 0;
2092         struct i40e_vsi *vsi;
2093
2094         hw->adapter_stopped = 0;
2095
2096         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2097                 PMD_INIT_LOG(ERR,
2098                 "Invalid link_speeds for port %u, autonegotiation disabled",
2099                               dev->data->port_id);
2100                 return -EINVAL;
2101         }
2102
2103         rte_intr_disable(intr_handle);
2104
2105         if ((rte_intr_cap_multiple(intr_handle) ||
2106              !RTE_ETH_DEV_SRIOV(dev).active) &&
2107             dev->data->dev_conf.intr_conf.rxq != 0) {
2108                 intr_vector = dev->data->nb_rx_queues;
2109                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2110                 if (ret)
2111                         return ret;
2112         }
2113
2114         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2115                 intr_handle->intr_vec =
2116                         rte_zmalloc("intr_vec",
2117                                     dev->data->nb_rx_queues * sizeof(int),
2118                                     0);
2119                 if (!intr_handle->intr_vec) {
2120                         PMD_INIT_LOG(ERR,
2121                                 "Failed to allocate %d rx_queues intr_vec",
2122                                 dev->data->nb_rx_queues);
2123                         return -ENOMEM;
2124                 }
2125         }
2126
2127         /* Initialize VSI */
2128         ret = i40e_dev_rxtx_init(pf);
2129         if (ret != I40E_SUCCESS) {
2130                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2131                 goto err_up;
2132         }
2133
2134         /* Map queues with MSIX interrupt */
2135         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2136                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2137         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2138         i40e_vsi_enable_queues_intr(main_vsi);
2139
2140         /* Map VMDQ VSI queues with MSIX interrupt */
2141         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2142                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2143                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2144                                           I40E_ITR_INDEX_DEFAULT);
2145                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2146         }
2147
2148         /* enable FDIR MSIX interrupt */
2149         if (pf->fdir.fdir_vsi) {
2150                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2151                                           I40E_ITR_INDEX_NONE);
2152                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2153         }
2154
2155         /* Enable all queues which have been configured */
2156         ret = i40e_dev_switch_queues(pf, TRUE);
2157         if (ret != I40E_SUCCESS) {
2158                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2159                 goto err_up;
2160         }
2161
2162         /* Enable receiving broadcast packets */
2163         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2164         if (ret != I40E_SUCCESS)
2165                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2166
2167         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2168                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2169                                                 true, NULL);
2170                 if (ret != I40E_SUCCESS)
2171                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2172         }
2173
2174         /* Enable the VLAN promiscuous mode. */
2175         if (pf->vfs) {
2176                 for (i = 0; i < pf->vf_num; i++) {
2177                         vsi = pf->vfs[i].vsi;
2178                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2179                                                      true, NULL);
2180                 }
2181         }
2182
2183         /* Enable mac loopback mode */
2184         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2185             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2186                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2187                 if (ret != I40E_SUCCESS) {
2188                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2189                         goto err_up;
2190                 }
2191         }
2192
2193         /* Apply link configure */
2194         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2195                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2196                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2197                                 ETH_LINK_SPEED_40G)) {
2198                 PMD_DRV_LOG(ERR, "Invalid link setting");
2199                 goto err_up;
2200         }
2201         ret = i40e_apply_link_speed(dev);
2202         if (I40E_SUCCESS != ret) {
2203                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2204                 goto err_up;
2205         }
2206
2207         if (!rte_intr_allow_others(intr_handle)) {
2208                 rte_intr_callback_unregister(intr_handle,
2209                                              i40e_dev_interrupt_handler,
2210                                              (void *)dev);
2211                 /* configure and enable device interrupt */
2212                 i40e_pf_config_irq0(hw, FALSE);
2213                 i40e_pf_enable_irq0(hw);
2214
2215                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2216                         PMD_INIT_LOG(INFO,
2217                                 "lsc won't enable because of no intr multiplex");
2218         } else {
2219                 ret = i40e_aq_set_phy_int_mask(hw,
2220                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2221                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2222                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2223                 if (ret != I40E_SUCCESS)
2224                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2225
2226                 /* Call get_link_info aq commond to enable/disable LSE */
2227                 i40e_dev_link_update(dev, 0);
2228         }
2229
2230         /* enable uio intr after callback register */
2231         rte_intr_enable(intr_handle);
2232
2233         i40e_filter_restore(pf);
2234
2235         if (pf->tm_conf.root && !pf->tm_conf.committed)
2236                 PMD_DRV_LOG(WARNING,
2237                             "please call hierarchy_commit() "
2238                             "before starting the port");
2239
2240         return I40E_SUCCESS;
2241
2242 err_up:
2243         i40e_dev_switch_queues(pf, FALSE);
2244         i40e_dev_clear_queues(dev);
2245
2246         return ret;
2247 }
2248
2249 static void
2250 i40e_dev_stop(struct rte_eth_dev *dev)
2251 {
2252         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2253         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2254         struct i40e_vsi *main_vsi = pf->main_vsi;
2255         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2256         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2257         int i;
2258
2259         if (hw->adapter_stopped == 1)
2260                 return;
2261         /* Disable all queues */
2262         i40e_dev_switch_queues(pf, FALSE);
2263
2264         /* un-map queues with interrupt registers */
2265         i40e_vsi_disable_queues_intr(main_vsi);
2266         i40e_vsi_queues_unbind_intr(main_vsi);
2267
2268         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2269                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2270                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2271         }
2272
2273         if (pf->fdir.fdir_vsi) {
2274                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2275                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2276         }
2277         /* Clear all queues and release memory */
2278         i40e_dev_clear_queues(dev);
2279
2280         /* Set link down */
2281         i40e_dev_set_link_down(dev);
2282
2283         if (!rte_intr_allow_others(intr_handle))
2284                 /* resume to the default handler */
2285                 rte_intr_callback_register(intr_handle,
2286                                            i40e_dev_interrupt_handler,
2287                                            (void *)dev);
2288
2289         /* Clean datapath event and queue/vec mapping */
2290         rte_intr_efd_disable(intr_handle);
2291         if (intr_handle->intr_vec) {
2292                 rte_free(intr_handle->intr_vec);
2293                 intr_handle->intr_vec = NULL;
2294         }
2295
2296         /* reset hierarchy commit */
2297         pf->tm_conf.committed = false;
2298
2299         hw->adapter_stopped = 1;
2300 }
2301
2302 static void
2303 i40e_dev_close(struct rte_eth_dev *dev)
2304 {
2305         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2306         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2307         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2308         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2309         struct i40e_mirror_rule *p_mirror;
2310         uint32_t reg;
2311         int i;
2312         int ret;
2313
2314         PMD_INIT_FUNC_TRACE();
2315
2316         i40e_dev_stop(dev);
2317
2318         /* Remove all mirror rules */
2319         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2320                 ret = i40e_aq_del_mirror_rule(hw,
2321                                               pf->main_vsi->veb->seid,
2322                                               p_mirror->rule_type,
2323                                               p_mirror->entries,
2324                                               p_mirror->num_entries,
2325                                               p_mirror->id);
2326                 if (ret < 0)
2327                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2328                                     "status = %d, aq_err = %d.", ret,
2329                                     hw->aq.asq_last_status);
2330
2331                 /* remove mirror software resource anyway */
2332                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2333                 rte_free(p_mirror);
2334                 pf->nb_mirror_rule--;
2335         }
2336
2337         i40e_dev_free_queues(dev);
2338
2339         /* Disable interrupt */
2340         i40e_pf_disable_irq0(hw);
2341         rte_intr_disable(intr_handle);
2342
2343         /* shutdown and destroy the HMC */
2344         i40e_shutdown_lan_hmc(hw);
2345
2346         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2347                 i40e_vsi_release(pf->vmdq[i].vsi);
2348                 pf->vmdq[i].vsi = NULL;
2349         }
2350         rte_free(pf->vmdq);
2351         pf->vmdq = NULL;
2352
2353         /* release all the existing VSIs and VEBs */
2354         i40e_fdir_teardown(pf);
2355         i40e_vsi_release(pf->main_vsi);
2356
2357         /* shutdown the adminq */
2358         i40e_aq_queue_shutdown(hw, true);
2359         i40e_shutdown_adminq(hw);
2360
2361         i40e_res_pool_destroy(&pf->qp_pool);
2362         i40e_res_pool_destroy(&pf->msix_pool);
2363
2364         /* Disable flexible payload in global configuration */
2365         if (!pf->support_multi_driver)
2366                 i40e_flex_payload_reg_set_default(hw);
2367
2368         /* force a PF reset to clean anything leftover */
2369         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2370         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2371                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2372         I40E_WRITE_FLUSH(hw);
2373 }
2374
2375 /*
2376  * Reset PF device only to re-initialize resources in PMD layer
2377  */
2378 static int
2379 i40e_dev_reset(struct rte_eth_dev *dev)
2380 {
2381         int ret;
2382
2383         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2384          * its VF to make them align with it. The detailed notification
2385          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2386          * To avoid unexpected behavior in VF, currently reset of PF with
2387          * SR-IOV activation is not supported. It might be supported later.
2388          */
2389         if (dev->data->sriov.active)
2390                 return -ENOTSUP;
2391
2392         ret = eth_i40e_dev_uninit(dev);
2393         if (ret)
2394                 return ret;
2395
2396         ret = eth_i40e_dev_init(dev, NULL);
2397
2398         return ret;
2399 }
2400
2401 static void
2402 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2403 {
2404         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2405         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2406         struct i40e_vsi *vsi = pf->main_vsi;
2407         int status;
2408
2409         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2410                                                      true, NULL, true);
2411         if (status != I40E_SUCCESS)
2412                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2413
2414         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2415                                                         TRUE, NULL);
2416         if (status != I40E_SUCCESS)
2417                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2418
2419 }
2420
2421 static void
2422 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2423 {
2424         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2425         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2426         struct i40e_vsi *vsi = pf->main_vsi;
2427         int status;
2428
2429         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2430                                                      false, NULL, true);
2431         if (status != I40E_SUCCESS)
2432                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2433
2434         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2435                                                         false, NULL);
2436         if (status != I40E_SUCCESS)
2437                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2438 }
2439
2440 static void
2441 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2442 {
2443         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2444         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2445         struct i40e_vsi *vsi = pf->main_vsi;
2446         int ret;
2447
2448         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2449         if (ret != I40E_SUCCESS)
2450                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2451 }
2452
2453 static void
2454 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2455 {
2456         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2457         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2458         struct i40e_vsi *vsi = pf->main_vsi;
2459         int ret;
2460
2461         if (dev->data->promiscuous == 1)
2462                 return; /* must remain in all_multicast mode */
2463
2464         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2465                                 vsi->seid, FALSE, NULL);
2466         if (ret != I40E_SUCCESS)
2467                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2468 }
2469
2470 /*
2471  * Set device link up.
2472  */
2473 static int
2474 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2475 {
2476         /* re-apply link speed setting */
2477         return i40e_apply_link_speed(dev);
2478 }
2479
2480 /*
2481  * Set device link down.
2482  */
2483 static int
2484 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2485 {
2486         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2487         uint8_t abilities = 0;
2488         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2489
2490         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2491         return i40e_phy_conf_link(hw, abilities, speed, false);
2492 }
2493
2494 static __rte_always_inline void
2495 update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
2496 {
2497 /* Link status registers and values*/
2498 #define I40E_PRTMAC_LINKSTA             0x001E2420
2499 #define I40E_REG_LINK_UP                0x40000080
2500 #define I40E_PRTMAC_MACC                0x001E24E0
2501 #define I40E_REG_MACC_25GB              0x00020000
2502 #define I40E_REG_SPEED_MASK             0x38000000
2503 #define I40E_REG_SPEED_100MB            0x00000000
2504 #define I40E_REG_SPEED_1GB              0x08000000
2505 #define I40E_REG_SPEED_10GB             0x10000000
2506 #define I40E_REG_SPEED_20GB             0x20000000
2507 #define I40E_REG_SPEED_25_40GB          0x18000000
2508         uint32_t link_speed;
2509         uint32_t reg_val;
2510
2511         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2512         link_speed = reg_val & I40E_REG_SPEED_MASK;
2513         reg_val &= I40E_REG_LINK_UP;
2514         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2515
2516         if (unlikely(link->link_status == 0))
2517                 return;
2518
2519         /* Parse the link status */
2520         switch (link_speed) {
2521         case I40E_REG_SPEED_100MB:
2522                 link->link_speed = ETH_SPEED_NUM_100M;
2523                 break;
2524         case I40E_REG_SPEED_1GB:
2525                 link->link_speed = ETH_SPEED_NUM_1G;
2526                 break;
2527         case I40E_REG_SPEED_10GB:
2528                 link->link_speed = ETH_SPEED_NUM_10G;
2529                 break;
2530         case I40E_REG_SPEED_20GB:
2531                 link->link_speed = ETH_SPEED_NUM_20G;
2532                 break;
2533         case I40E_REG_SPEED_25_40GB:
2534                 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2535
2536                 if (reg_val & I40E_REG_MACC_25GB)
2537                         link->link_speed = ETH_SPEED_NUM_25G;
2538                 else
2539                         link->link_speed = ETH_SPEED_NUM_40G;
2540
2541                 break;
2542         default:
2543                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2544                 break;
2545         }
2546 }
2547
2548 static __rte_always_inline void
2549 update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
2550         bool enable_lse)
2551 {
2552 #define CHECK_INTERVAL             100  /* 100ms */
2553 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2554         uint32_t rep_cnt = MAX_REPEAT_TIME;
2555         struct i40e_link_status link_status;
2556         int status;
2557
2558         memset(&link_status, 0, sizeof(link_status));
2559
2560         do {
2561                 memset(&link_status, 0, sizeof(link_status));
2562
2563                 /* Get link status information from hardware */
2564                 status = i40e_aq_get_link_info(hw, enable_lse,
2565                                                 &link_status, NULL);
2566                 if (unlikely(status != I40E_SUCCESS)) {
2567                         link->link_speed = ETH_SPEED_NUM_100M;
2568                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2569                         PMD_DRV_LOG(ERR, "Failed to get link info");
2570                         return;
2571                 }
2572
2573                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2574                 if (unlikely(link->link_status != 0))
2575                         break;
2576
2577                 rte_delay_ms(CHECK_INTERVAL);
2578         } while (--rep_cnt);
2579
2580         /* Parse the link status */
2581         switch (link_status.link_speed) {
2582         case I40E_LINK_SPEED_100MB:
2583                 link->link_speed = ETH_SPEED_NUM_100M;
2584                 break;
2585         case I40E_LINK_SPEED_1GB:
2586                 link->link_speed = ETH_SPEED_NUM_1G;
2587                 break;
2588         case I40E_LINK_SPEED_10GB:
2589                 link->link_speed = ETH_SPEED_NUM_10G;
2590                 break;
2591         case I40E_LINK_SPEED_20GB:
2592                 link->link_speed = ETH_SPEED_NUM_20G;
2593                 break;
2594         case I40E_LINK_SPEED_25GB:
2595                 link->link_speed = ETH_SPEED_NUM_25G;
2596                 break;
2597         case I40E_LINK_SPEED_40GB:
2598                 link->link_speed = ETH_SPEED_NUM_40G;
2599                 break;
2600         default:
2601                 link->link_speed = ETH_SPEED_NUM_100M;
2602                 break;
2603         }
2604 }
2605
2606 int
2607 i40e_dev_link_update(struct rte_eth_dev *dev,
2608                      int wait_to_complete)
2609 {
2610         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2611         struct rte_eth_link link;
2612         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2613         int ret;
2614
2615         memset(&link, 0, sizeof(link));
2616
2617         /* i40e uses full duplex only */
2618         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2619         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2620                         ETH_LINK_SPEED_FIXED);
2621
2622         if (!wait_to_complete)
2623                 update_link_no_wait(hw, &link);
2624         else
2625                 update_link_wait(hw, &link, enable_lse);
2626
2627         ret = rte_eth_linkstatus_set(dev, &link);
2628         i40e_notify_all_vfs_link_status(dev);
2629
2630         return ret;
2631 }
2632
2633 /* Get all the statistics of a VSI */
2634 void
2635 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2636 {
2637         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2638         struct i40e_eth_stats *nes = &vsi->eth_stats;
2639         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2640         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2641
2642         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2643                             vsi->offset_loaded, &oes->rx_bytes,
2644                             &nes->rx_bytes);
2645         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2646                             vsi->offset_loaded, &oes->rx_unicast,
2647                             &nes->rx_unicast);
2648         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2649                             vsi->offset_loaded, &oes->rx_multicast,
2650                             &nes->rx_multicast);
2651         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2652                             vsi->offset_loaded, &oes->rx_broadcast,
2653                             &nes->rx_broadcast);
2654         /* exclude CRC bytes */
2655         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2656                 nes->rx_broadcast) * ETHER_CRC_LEN;
2657
2658         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2659                             &oes->rx_discards, &nes->rx_discards);
2660         /* GLV_REPC not supported */
2661         /* GLV_RMPC not supported */
2662         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2663                             &oes->rx_unknown_protocol,
2664                             &nes->rx_unknown_protocol);
2665         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2666                             vsi->offset_loaded, &oes->tx_bytes,
2667                             &nes->tx_bytes);
2668         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2669                             vsi->offset_loaded, &oes->tx_unicast,
2670                             &nes->tx_unicast);
2671         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2672                             vsi->offset_loaded, &oes->tx_multicast,
2673                             &nes->tx_multicast);
2674         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2675                             vsi->offset_loaded,  &oes->tx_broadcast,
2676                             &nes->tx_broadcast);
2677         /* GLV_TDPC not supported */
2678         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2679                             &oes->tx_errors, &nes->tx_errors);
2680         vsi->offset_loaded = true;
2681
2682         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2683                     vsi->vsi_id);
2684         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2685         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2686         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2687         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2688         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2689         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2690                     nes->rx_unknown_protocol);
2691         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2692         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2693         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2694         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2695         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2696         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2697         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2698                     vsi->vsi_id);
2699 }
2700
2701 static void
2702 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2703 {
2704         unsigned int i;
2705         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2706         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2707
2708         /* Get rx/tx bytes of internal transfer packets */
2709         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2710                         I40E_GLV_GORCL(hw->port),
2711                         pf->offset_loaded,
2712                         &pf->internal_stats_offset.rx_bytes,
2713                         &pf->internal_stats.rx_bytes);
2714
2715         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2716                         I40E_GLV_GOTCL(hw->port),
2717                         pf->offset_loaded,
2718                         &pf->internal_stats_offset.tx_bytes,
2719                         &pf->internal_stats.tx_bytes);
2720         /* Get total internal rx packet count */
2721         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2722                             I40E_GLV_UPRCL(hw->port),
2723                             pf->offset_loaded,
2724                             &pf->internal_stats_offset.rx_unicast,
2725                             &pf->internal_stats.rx_unicast);
2726         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2727                             I40E_GLV_MPRCL(hw->port),
2728                             pf->offset_loaded,
2729                             &pf->internal_stats_offset.rx_multicast,
2730                             &pf->internal_stats.rx_multicast);
2731         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2732                             I40E_GLV_BPRCL(hw->port),
2733                             pf->offset_loaded,
2734                             &pf->internal_stats_offset.rx_broadcast,
2735                             &pf->internal_stats.rx_broadcast);
2736         /* Get total internal tx packet count */
2737         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2738                             I40E_GLV_UPTCL(hw->port),
2739                             pf->offset_loaded,
2740                             &pf->internal_stats_offset.tx_unicast,
2741                             &pf->internal_stats.tx_unicast);
2742         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2743                             I40E_GLV_MPTCL(hw->port),
2744                             pf->offset_loaded,
2745                             &pf->internal_stats_offset.tx_multicast,
2746                             &pf->internal_stats.tx_multicast);
2747         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2748                             I40E_GLV_BPTCL(hw->port),
2749                             pf->offset_loaded,
2750                             &pf->internal_stats_offset.tx_broadcast,
2751                             &pf->internal_stats.tx_broadcast);
2752
2753         /* exclude CRC size */
2754         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2755                 pf->internal_stats.rx_multicast +
2756                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2757
2758         /* Get statistics of struct i40e_eth_stats */
2759         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2760                             I40E_GLPRT_GORCL(hw->port),
2761                             pf->offset_loaded, &os->eth.rx_bytes,
2762                             &ns->eth.rx_bytes);
2763         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2764                             I40E_GLPRT_UPRCL(hw->port),
2765                             pf->offset_loaded, &os->eth.rx_unicast,
2766                             &ns->eth.rx_unicast);
2767         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2768                             I40E_GLPRT_MPRCL(hw->port),
2769                             pf->offset_loaded, &os->eth.rx_multicast,
2770                             &ns->eth.rx_multicast);
2771         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2772                             I40E_GLPRT_BPRCL(hw->port),
2773                             pf->offset_loaded, &os->eth.rx_broadcast,
2774                             &ns->eth.rx_broadcast);
2775         /* Workaround: CRC size should not be included in byte statistics,
2776          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2777          */
2778         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2779                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2780
2781         /* exclude internal rx bytes
2782          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2783          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2784          * value.
2785          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2786          */
2787         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2788                 ns->eth.rx_bytes = 0;
2789         else
2790                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2791
2792         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2793                 ns->eth.rx_unicast = 0;
2794         else
2795                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2796
2797         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2798                 ns->eth.rx_multicast = 0;
2799         else
2800                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2801
2802         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2803                 ns->eth.rx_broadcast = 0;
2804         else
2805                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2806
2807         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2808                             pf->offset_loaded, &os->eth.rx_discards,
2809                             &ns->eth.rx_discards);
2810         /* GLPRT_REPC not supported */
2811         /* GLPRT_RMPC not supported */
2812         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2813                             pf->offset_loaded,
2814                             &os->eth.rx_unknown_protocol,
2815                             &ns->eth.rx_unknown_protocol);
2816         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2817                             I40E_GLPRT_GOTCL(hw->port),
2818                             pf->offset_loaded, &os->eth.tx_bytes,
2819                             &ns->eth.tx_bytes);
2820         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2821                             I40E_GLPRT_UPTCL(hw->port),
2822                             pf->offset_loaded, &os->eth.tx_unicast,
2823                             &ns->eth.tx_unicast);
2824         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2825                             I40E_GLPRT_MPTCL(hw->port),
2826                             pf->offset_loaded, &os->eth.tx_multicast,
2827                             &ns->eth.tx_multicast);
2828         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2829                             I40E_GLPRT_BPTCL(hw->port),
2830                             pf->offset_loaded, &os->eth.tx_broadcast,
2831                             &ns->eth.tx_broadcast);
2832         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2833                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2834
2835         /* exclude internal tx bytes
2836          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2837          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2838          * value.
2839          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2840          */
2841         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2842                 ns->eth.tx_bytes = 0;
2843         else
2844                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2845
2846         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2847                 ns->eth.tx_unicast = 0;
2848         else
2849                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2850
2851         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2852                 ns->eth.tx_multicast = 0;
2853         else
2854                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2855
2856         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2857                 ns->eth.tx_broadcast = 0;
2858         else
2859                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2860
2861         /* GLPRT_TEPC not supported */
2862
2863         /* additional port specific stats */
2864         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2865                             pf->offset_loaded, &os->tx_dropped_link_down,
2866                             &ns->tx_dropped_link_down);
2867         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2868                             pf->offset_loaded, &os->crc_errors,
2869                             &ns->crc_errors);
2870         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2871                             pf->offset_loaded, &os->illegal_bytes,
2872                             &ns->illegal_bytes);
2873         /* GLPRT_ERRBC not supported */
2874         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2875                             pf->offset_loaded, &os->mac_local_faults,
2876                             &ns->mac_local_faults);
2877         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2878                             pf->offset_loaded, &os->mac_remote_faults,
2879                             &ns->mac_remote_faults);
2880         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2881                             pf->offset_loaded, &os->rx_length_errors,
2882                             &ns->rx_length_errors);
2883         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2884                             pf->offset_loaded, &os->link_xon_rx,
2885                             &ns->link_xon_rx);
2886         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2887                             pf->offset_loaded, &os->link_xoff_rx,
2888                             &ns->link_xoff_rx);
2889         for (i = 0; i < 8; i++) {
2890                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2891                                     pf->offset_loaded,
2892                                     &os->priority_xon_rx[i],
2893                                     &ns->priority_xon_rx[i]);
2894                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2895                                     pf->offset_loaded,
2896                                     &os->priority_xoff_rx[i],
2897                                     &ns->priority_xoff_rx[i]);
2898         }
2899         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2900                             pf->offset_loaded, &os->link_xon_tx,
2901                             &ns->link_xon_tx);
2902         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2903                             pf->offset_loaded, &os->link_xoff_tx,
2904                             &ns->link_xoff_tx);
2905         for (i = 0; i < 8; i++) {
2906                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2907                                     pf->offset_loaded,
2908                                     &os->priority_xon_tx[i],
2909                                     &ns->priority_xon_tx[i]);
2910                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2911                                     pf->offset_loaded,
2912                                     &os->priority_xoff_tx[i],
2913                                     &ns->priority_xoff_tx[i]);
2914                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2915                                     pf->offset_loaded,
2916                                     &os->priority_xon_2_xoff[i],
2917                                     &ns->priority_xon_2_xoff[i]);
2918         }
2919         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2920                             I40E_GLPRT_PRC64L(hw->port),
2921                             pf->offset_loaded, &os->rx_size_64,
2922                             &ns->rx_size_64);
2923         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2924                             I40E_GLPRT_PRC127L(hw->port),
2925                             pf->offset_loaded, &os->rx_size_127,
2926                             &ns->rx_size_127);
2927         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2928                             I40E_GLPRT_PRC255L(hw->port),
2929                             pf->offset_loaded, &os->rx_size_255,
2930                             &ns->rx_size_255);
2931         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2932                             I40E_GLPRT_PRC511L(hw->port),
2933                             pf->offset_loaded, &os->rx_size_511,
2934                             &ns->rx_size_511);
2935         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2936                             I40E_GLPRT_PRC1023L(hw->port),
2937                             pf->offset_loaded, &os->rx_size_1023,
2938                             &ns->rx_size_1023);
2939         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2940                             I40E_GLPRT_PRC1522L(hw->port),
2941                             pf->offset_loaded, &os->rx_size_1522,
2942                             &ns->rx_size_1522);
2943         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2944                             I40E_GLPRT_PRC9522L(hw->port),
2945                             pf->offset_loaded, &os->rx_size_big,
2946                             &ns->rx_size_big);
2947         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2948                             pf->offset_loaded, &os->rx_undersize,
2949                             &ns->rx_undersize);
2950         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2951                             pf->offset_loaded, &os->rx_fragments,
2952                             &ns->rx_fragments);
2953         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2954                             pf->offset_loaded, &os->rx_oversize,
2955                             &ns->rx_oversize);
2956         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2957                             pf->offset_loaded, &os->rx_jabber,
2958                             &ns->rx_jabber);
2959         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2960                             I40E_GLPRT_PTC64L(hw->port),
2961                             pf->offset_loaded, &os->tx_size_64,
2962                             &ns->tx_size_64);
2963         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2964                             I40E_GLPRT_PTC127L(hw->port),
2965                             pf->offset_loaded, &os->tx_size_127,
2966                             &ns->tx_size_127);
2967         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2968                             I40E_GLPRT_PTC255L(hw->port),
2969                             pf->offset_loaded, &os->tx_size_255,
2970                             &ns->tx_size_255);
2971         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2972                             I40E_GLPRT_PTC511L(hw->port),
2973                             pf->offset_loaded, &os->tx_size_511,
2974                             &ns->tx_size_511);
2975         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2976                             I40E_GLPRT_PTC1023L(hw->port),
2977                             pf->offset_loaded, &os->tx_size_1023,
2978                             &ns->tx_size_1023);
2979         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2980                             I40E_GLPRT_PTC1522L(hw->port),
2981                             pf->offset_loaded, &os->tx_size_1522,
2982                             &ns->tx_size_1522);
2983         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2984                             I40E_GLPRT_PTC9522L(hw->port),
2985                             pf->offset_loaded, &os->tx_size_big,
2986                             &ns->tx_size_big);
2987         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2988                            pf->offset_loaded,
2989                            &os->fd_sb_match, &ns->fd_sb_match);
2990         /* GLPRT_MSPDC not supported */
2991         /* GLPRT_XEC not supported */
2992
2993         pf->offset_loaded = true;
2994
2995         if (pf->main_vsi)
2996                 i40e_update_vsi_stats(pf->main_vsi);
2997 }
2998
2999 /* Get all statistics of a port */
3000 static int
3001 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3002 {
3003         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3004         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3005         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3006         unsigned i;
3007
3008         /* call read registers - updates values, now write them to struct */
3009         i40e_read_stats_registers(pf, hw);
3010
3011         stats->ipackets = ns->eth.rx_unicast +
3012                         ns->eth.rx_multicast +
3013                         ns->eth.rx_broadcast -
3014                         ns->eth.rx_discards -
3015                         pf->main_vsi->eth_stats.rx_discards;
3016         stats->opackets = ns->eth.tx_unicast +
3017                         ns->eth.tx_multicast +
3018                         ns->eth.tx_broadcast;
3019         stats->ibytes   = ns->eth.rx_bytes;
3020         stats->obytes   = ns->eth.tx_bytes;
3021         stats->oerrors  = ns->eth.tx_errors +
3022                         pf->main_vsi->eth_stats.tx_errors;
3023
3024         /* Rx Errors */
3025         stats->imissed  = ns->eth.rx_discards +
3026                         pf->main_vsi->eth_stats.rx_discards;
3027         stats->ierrors  = ns->crc_errors +
3028                         ns->rx_length_errors + ns->rx_undersize +
3029                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3030
3031         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3032         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3033         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3034         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3035         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3036         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3037         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3038                     ns->eth.rx_unknown_protocol);
3039         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3040         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3041         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3042         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3043         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3044         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3045
3046         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3047                     ns->tx_dropped_link_down);
3048         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3049         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3050                     ns->illegal_bytes);
3051         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3052         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3053                     ns->mac_local_faults);
3054         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3055                     ns->mac_remote_faults);
3056         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3057                     ns->rx_length_errors);
3058         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3059         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3060         for (i = 0; i < 8; i++) {
3061                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3062                                 i, ns->priority_xon_rx[i]);
3063                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3064                                 i, ns->priority_xoff_rx[i]);
3065         }
3066         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3067         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3068         for (i = 0; i < 8; i++) {
3069                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3070                                 i, ns->priority_xon_tx[i]);
3071                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3072                                 i, ns->priority_xoff_tx[i]);
3073                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3074                                 i, ns->priority_xon_2_xoff[i]);
3075         }
3076         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3077         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3078         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3079         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3080         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3081         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3082         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3083         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3084         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3085         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3086         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3087         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3088         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3089         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3090         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3091         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3092         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3093         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3094         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3095                         ns->mac_short_packet_dropped);
3096         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3097                     ns->checksum_error);
3098         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3099         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3100         return 0;
3101 }
3102
3103 /* Reset the statistics */
3104 static void
3105 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3106 {
3107         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3108         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3109
3110         /* Mark PF and VSI stats to update the offset, aka "reset" */
3111         pf->offset_loaded = false;
3112         if (pf->main_vsi)
3113                 pf->main_vsi->offset_loaded = false;
3114
3115         /* read the stats, reading current register values into offset */
3116         i40e_read_stats_registers(pf, hw);
3117 }
3118
3119 static uint32_t
3120 i40e_xstats_calc_num(void)
3121 {
3122         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3123                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3124                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3125 }
3126
3127 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3128                                      struct rte_eth_xstat_name *xstats_names,
3129                                      __rte_unused unsigned limit)
3130 {
3131         unsigned count = 0;
3132         unsigned i, prio;
3133
3134         if (xstats_names == NULL)
3135                 return i40e_xstats_calc_num();
3136
3137         /* Note: limit checked in rte_eth_xstats_names() */
3138
3139         /* Get stats from i40e_eth_stats struct */
3140         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3141                 snprintf(xstats_names[count].name,
3142                          sizeof(xstats_names[count].name),
3143                          "%s", rte_i40e_stats_strings[i].name);
3144                 count++;
3145         }
3146
3147         /* Get individiual stats from i40e_hw_port struct */
3148         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3149                 snprintf(xstats_names[count].name,
3150                         sizeof(xstats_names[count].name),
3151                          "%s", rte_i40e_hw_port_strings[i].name);
3152                 count++;
3153         }
3154
3155         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3156                 for (prio = 0; prio < 8; prio++) {
3157                         snprintf(xstats_names[count].name,
3158                                  sizeof(xstats_names[count].name),
3159                                  "rx_priority%u_%s", prio,
3160                                  rte_i40e_rxq_prio_strings[i].name);
3161                         count++;
3162                 }
3163         }
3164
3165         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3166                 for (prio = 0; prio < 8; prio++) {
3167                         snprintf(xstats_names[count].name,
3168                                  sizeof(xstats_names[count].name),
3169                                  "tx_priority%u_%s", prio,
3170                                  rte_i40e_txq_prio_strings[i].name);
3171                         count++;
3172                 }
3173         }
3174         return count;
3175 }
3176
3177 static int
3178 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3179                     unsigned n)
3180 {
3181         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3182         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3183         unsigned i, count, prio;
3184         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3185
3186         count = i40e_xstats_calc_num();
3187         if (n < count)
3188                 return count;
3189
3190         i40e_read_stats_registers(pf, hw);
3191
3192         if (xstats == NULL)
3193                 return 0;
3194
3195         count = 0;
3196
3197         /* Get stats from i40e_eth_stats struct */
3198         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3199                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3200                         rte_i40e_stats_strings[i].offset);
3201                 xstats[count].id = count;
3202                 count++;
3203         }
3204
3205         /* Get individiual stats from i40e_hw_port struct */
3206         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3207                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3208                         rte_i40e_hw_port_strings[i].offset);
3209                 xstats[count].id = count;
3210                 count++;
3211         }
3212
3213         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3214                 for (prio = 0; prio < 8; prio++) {
3215                         xstats[count].value =
3216                                 *(uint64_t *)(((char *)hw_stats) +
3217                                 rte_i40e_rxq_prio_strings[i].offset +
3218                                 (sizeof(uint64_t) * prio));
3219                         xstats[count].id = count;
3220                         count++;
3221                 }
3222         }
3223
3224         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3225                 for (prio = 0; prio < 8; prio++) {
3226                         xstats[count].value =
3227                                 *(uint64_t *)(((char *)hw_stats) +
3228                                 rte_i40e_txq_prio_strings[i].offset +
3229                                 (sizeof(uint64_t) * prio));
3230                         xstats[count].id = count;
3231                         count++;
3232                 }
3233         }
3234
3235         return count;
3236 }
3237
3238 static int
3239 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3240                                  __rte_unused uint16_t queue_id,
3241                                  __rte_unused uint8_t stat_idx,
3242                                  __rte_unused uint8_t is_rx)
3243 {
3244         PMD_INIT_FUNC_TRACE();
3245
3246         return -ENOSYS;
3247 }
3248
3249 static int
3250 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3251 {
3252         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3253         u32 full_ver;
3254         u8 ver, patch;
3255         u16 build;
3256         int ret;
3257
3258         full_ver = hw->nvm.oem_ver;
3259         ver = (u8)(full_ver >> 24);
3260         build = (u16)((full_ver >> 8) & 0xffff);
3261         patch = (u8)(full_ver & 0xff);
3262
3263         ret = snprintf(fw_version, fw_size,
3264                  "%d.%d%d 0x%08x %d.%d.%d",
3265                  ((hw->nvm.version >> 12) & 0xf),
3266                  ((hw->nvm.version >> 4) & 0xff),
3267                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3268                  ver, build, patch);
3269
3270         ret += 1; /* add the size of '\0' */
3271         if (fw_size < (u32)ret)
3272                 return ret;
3273         else
3274                 return 0;
3275 }
3276
3277 static void
3278 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3279 {
3280         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3281         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3282         struct i40e_vsi *vsi = pf->main_vsi;
3283         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3284
3285         dev_info->max_rx_queues = vsi->nb_qps;
3286         dev_info->max_tx_queues = vsi->nb_qps;
3287         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3288         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3289         dev_info->max_mac_addrs = vsi->max_macaddrs;
3290         dev_info->max_vfs = pci_dev->max_vfs;
3291         dev_info->rx_queue_offload_capa = 0;
3292         dev_info->rx_offload_capa =
3293                 DEV_RX_OFFLOAD_VLAN_STRIP |
3294                 DEV_RX_OFFLOAD_QINQ_STRIP |
3295                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3296                 DEV_RX_OFFLOAD_UDP_CKSUM |
3297                 DEV_RX_OFFLOAD_TCP_CKSUM |
3298                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3299                 DEV_RX_OFFLOAD_CRC_STRIP |
3300                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3301                 DEV_RX_OFFLOAD_VLAN_FILTER |
3302                 DEV_RX_OFFLOAD_JUMBO_FRAME;
3303
3304         dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3305         dev_info->tx_offload_capa =
3306                 DEV_TX_OFFLOAD_VLAN_INSERT |
3307                 DEV_TX_OFFLOAD_QINQ_INSERT |
3308                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3309                 DEV_TX_OFFLOAD_UDP_CKSUM |
3310                 DEV_TX_OFFLOAD_TCP_CKSUM |
3311                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3312                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3313                 DEV_TX_OFFLOAD_TCP_TSO |
3314                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3315                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3316                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3317                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3318                 DEV_TX_OFFLOAD_MULTI_SEGS |
3319                 dev_info->tx_queue_offload_capa;
3320         dev_info->dev_capa =
3321                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3322                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3323
3324         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3325                                                 sizeof(uint32_t);
3326         dev_info->reta_size = pf->hash_lut_size;
3327         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3328
3329         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3330                 .rx_thresh = {
3331                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3332                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3333                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3334                 },
3335                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3336                 .rx_drop_en = 0,
3337                 .offloads = 0,
3338         };
3339
3340         dev_info->default_txconf = (struct rte_eth_txconf) {
3341                 .tx_thresh = {
3342                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3343                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3344                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3345                 },
3346                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3347                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3348                 .offloads = 0,
3349         };
3350
3351         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3352                 .nb_max = I40E_MAX_RING_DESC,
3353                 .nb_min = I40E_MIN_RING_DESC,
3354                 .nb_align = I40E_ALIGN_RING_DESC,
3355         };
3356
3357         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3358                 .nb_max = I40E_MAX_RING_DESC,
3359                 .nb_min = I40E_MIN_RING_DESC,
3360                 .nb_align = I40E_ALIGN_RING_DESC,
3361                 .nb_seg_max = I40E_TX_MAX_SEG,
3362                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3363         };
3364
3365         if (pf->flags & I40E_FLAG_VMDQ) {
3366                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3367                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3368                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3369                                                 pf->max_nb_vmdq_vsi;
3370                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3371                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3372                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3373         }
3374
3375         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3376                 /* For XL710 */
3377                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3378                 dev_info->default_rxportconf.nb_queues = 2;
3379                 dev_info->default_txportconf.nb_queues = 2;
3380                 if (dev->data->nb_rx_queues == 1)
3381                         dev_info->default_rxportconf.ring_size = 2048;
3382                 else
3383                         dev_info->default_rxportconf.ring_size = 1024;
3384                 if (dev->data->nb_tx_queues == 1)
3385                         dev_info->default_txportconf.ring_size = 1024;
3386                 else
3387                         dev_info->default_txportconf.ring_size = 512;
3388
3389         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3390                 /* For XXV710 */
3391                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3392                 dev_info->default_rxportconf.nb_queues = 1;
3393                 dev_info->default_txportconf.nb_queues = 1;
3394                 dev_info->default_rxportconf.ring_size = 256;
3395                 dev_info->default_txportconf.ring_size = 256;
3396         } else {
3397                 /* For X710 */
3398                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3399                 dev_info->default_rxportconf.nb_queues = 1;
3400                 dev_info->default_txportconf.nb_queues = 1;
3401                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3402                         dev_info->default_rxportconf.ring_size = 512;
3403                         dev_info->default_txportconf.ring_size = 256;
3404                 } else {
3405                         dev_info->default_rxportconf.ring_size = 256;
3406                         dev_info->default_txportconf.ring_size = 256;
3407                 }
3408         }
3409         dev_info->default_rxportconf.burst_size = 32;
3410         dev_info->default_txportconf.burst_size = 32;
3411 }
3412
3413 static int
3414 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3415 {
3416         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3417         struct i40e_vsi *vsi = pf->main_vsi;
3418         PMD_INIT_FUNC_TRACE();
3419
3420         if (on)
3421                 return i40e_vsi_add_vlan(vsi, vlan_id);
3422         else
3423                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3424 }
3425
3426 static int
3427 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3428                                 enum rte_vlan_type vlan_type,
3429                                 uint16_t tpid, int qinq)
3430 {
3431         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3432         uint64_t reg_r = 0;
3433         uint64_t reg_w = 0;
3434         uint16_t reg_id = 3;
3435         int ret;
3436
3437         if (qinq) {
3438                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3439                         reg_id = 2;
3440         }
3441
3442         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3443                                           &reg_r, NULL);
3444         if (ret != I40E_SUCCESS) {
3445                 PMD_DRV_LOG(ERR,
3446                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3447                            reg_id);
3448                 return -EIO;
3449         }
3450         PMD_DRV_LOG(DEBUG,
3451                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3452                     reg_id, reg_r);
3453
3454         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3455         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3456         if (reg_r == reg_w) {
3457                 PMD_DRV_LOG(DEBUG, "No need to write");
3458                 return 0;
3459         }
3460
3461         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3462                                            reg_w, NULL);
3463         if (ret != I40E_SUCCESS) {
3464                 PMD_DRV_LOG(ERR,
3465                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3466                             reg_id);
3467                 return -EIO;
3468         }
3469         PMD_DRV_LOG(DEBUG,
3470                     "Global register 0x%08x is changed with value 0x%08x",
3471                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3472
3473         return 0;
3474 }
3475
3476 static int
3477 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3478                    enum rte_vlan_type vlan_type,
3479                    uint16_t tpid)
3480 {
3481         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3482         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3483         int qinq = dev->data->dev_conf.rxmode.offloads &
3484                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3485         int ret = 0;
3486
3487         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3488              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3489             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3490                 PMD_DRV_LOG(ERR,
3491                             "Unsupported vlan type.");
3492                 return -EINVAL;
3493         }
3494
3495         if (pf->support_multi_driver) {
3496                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3497                 return -ENOTSUP;
3498         }
3499
3500         /* 802.1ad frames ability is added in NVM API 1.7*/
3501         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3502                 if (qinq) {
3503                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3504                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3505                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3506                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3507                 } else {
3508                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3509                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3510                 }
3511                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3512                 if (ret != I40E_SUCCESS) {
3513                         PMD_DRV_LOG(ERR,
3514                                     "Set switch config failed aq_err: %d",
3515                                     hw->aq.asq_last_status);
3516                         ret = -EIO;
3517                 }
3518         } else
3519                 /* If NVM API < 1.7, keep the register setting */
3520                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3521                                                       tpid, qinq);
3522         i40e_global_cfg_warning(I40E_WARNING_TPID);
3523
3524         return ret;
3525 }
3526
3527 static int
3528 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3529 {
3530         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3531         struct i40e_vsi *vsi = pf->main_vsi;
3532         struct rte_eth_rxmode *rxmode;
3533
3534         rxmode = &dev->data->dev_conf.rxmode;
3535         if (mask & ETH_VLAN_FILTER_MASK) {
3536                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3537                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3538                 else
3539                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3540         }
3541
3542         if (mask & ETH_VLAN_STRIP_MASK) {
3543                 /* Enable or disable VLAN stripping */
3544                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3545                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3546                 else
3547                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3548         }
3549
3550         if (mask & ETH_VLAN_EXTEND_MASK) {
3551                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
3552                         i40e_vsi_config_double_vlan(vsi, TRUE);
3553                         /* Set global registers with default ethertype. */
3554                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3555                                            ETHER_TYPE_VLAN);
3556                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3557                                            ETHER_TYPE_VLAN);
3558                 }
3559                 else
3560                         i40e_vsi_config_double_vlan(vsi, FALSE);
3561         }
3562
3563         return 0;
3564 }
3565
3566 static void
3567 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3568                           __rte_unused uint16_t queue,
3569                           __rte_unused int on)
3570 {
3571         PMD_INIT_FUNC_TRACE();
3572 }
3573
3574 static int
3575 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3576 {
3577         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3578         struct i40e_vsi *vsi = pf->main_vsi;
3579         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3580         struct i40e_vsi_vlan_pvid_info info;
3581
3582         memset(&info, 0, sizeof(info));
3583         info.on = on;
3584         if (info.on)
3585                 info.config.pvid = pvid;
3586         else {
3587                 info.config.reject.tagged =
3588                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3589                 info.config.reject.untagged =
3590                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3591         }
3592
3593         return i40e_vsi_vlan_pvid_set(vsi, &info);
3594 }
3595
3596 static int
3597 i40e_dev_led_on(struct rte_eth_dev *dev)
3598 {
3599         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3600         uint32_t mode = i40e_led_get(hw);
3601
3602         if (mode == 0)
3603                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3604
3605         return 0;
3606 }
3607
3608 static int
3609 i40e_dev_led_off(struct rte_eth_dev *dev)
3610 {
3611         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3612         uint32_t mode = i40e_led_get(hw);
3613
3614         if (mode != 0)
3615                 i40e_led_set(hw, 0, false);
3616
3617         return 0;
3618 }
3619
3620 static int
3621 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3622 {
3623         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3624         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3625
3626         fc_conf->pause_time = pf->fc_conf.pause_time;
3627
3628         /* read out from register, in case they are modified by other port */
3629         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3630                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3631         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3632                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3633
3634         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3635         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3636
3637          /* Return current mode according to actual setting*/
3638         switch (hw->fc.current_mode) {
3639         case I40E_FC_FULL:
3640                 fc_conf->mode = RTE_FC_FULL;
3641                 break;
3642         case I40E_FC_TX_PAUSE:
3643                 fc_conf->mode = RTE_FC_TX_PAUSE;
3644                 break;
3645         case I40E_FC_RX_PAUSE:
3646                 fc_conf->mode = RTE_FC_RX_PAUSE;
3647                 break;
3648         case I40E_FC_NONE:
3649         default:
3650                 fc_conf->mode = RTE_FC_NONE;
3651         };
3652
3653         return 0;
3654 }
3655
3656 static int
3657 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3658 {
3659         uint32_t mflcn_reg, fctrl_reg, reg;
3660         uint32_t max_high_water;
3661         uint8_t i, aq_failure;
3662         int err;
3663         struct i40e_hw *hw;
3664         struct i40e_pf *pf;
3665         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3666                 [RTE_FC_NONE] = I40E_FC_NONE,
3667                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3668                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3669                 [RTE_FC_FULL] = I40E_FC_FULL
3670         };
3671
3672         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3673
3674         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3675         if ((fc_conf->high_water > max_high_water) ||
3676                         (fc_conf->high_water < fc_conf->low_water)) {
3677                 PMD_INIT_LOG(ERR,
3678                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3679                         max_high_water);
3680                 return -EINVAL;
3681         }
3682
3683         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3684         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3685         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3686
3687         pf->fc_conf.pause_time = fc_conf->pause_time;
3688         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3689         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3690
3691         PMD_INIT_FUNC_TRACE();
3692
3693         /* All the link flow control related enable/disable register
3694          * configuration is handle by the F/W
3695          */
3696         err = i40e_set_fc(hw, &aq_failure, true);
3697         if (err < 0)
3698                 return -ENOSYS;
3699
3700         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3701                 /* Configure flow control refresh threshold,
3702                  * the value for stat_tx_pause_refresh_timer[8]
3703                  * is used for global pause operation.
3704                  */
3705
3706                 I40E_WRITE_REG(hw,
3707                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3708                                pf->fc_conf.pause_time);
3709
3710                 /* configure the timer value included in transmitted pause
3711                  * frame,
3712                  * the value for stat_tx_pause_quanta[8] is used for global
3713                  * pause operation
3714                  */
3715                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3716                                pf->fc_conf.pause_time);
3717
3718                 fctrl_reg = I40E_READ_REG(hw,
3719                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3720
3721                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3722                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3723                 else
3724                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3725
3726                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3727                                fctrl_reg);
3728         } else {
3729                 /* Configure pause time (2 TCs per register) */
3730                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3731                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3732                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3733
3734                 /* Configure flow control refresh threshold value */
3735                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3736                                pf->fc_conf.pause_time / 2);
3737
3738                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3739
3740                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3741                  *depending on configuration
3742                  */
3743                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3744                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3745                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3746                 } else {
3747                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3748                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3749                 }
3750
3751                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3752         }
3753
3754         if (!pf->support_multi_driver) {
3755                 /* config water marker both based on the packets and bytes */
3756                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3757                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3758                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3759                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3760                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3761                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3762                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3763                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3764                                   << I40E_KILOSHIFT);
3765                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3766                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3767                                    << I40E_KILOSHIFT);
3768                 i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
3769         } else {
3770                 PMD_DRV_LOG(ERR,
3771                             "Water marker configuration is not supported.");
3772         }
3773
3774         I40E_WRITE_FLUSH(hw);
3775
3776         return 0;
3777 }
3778
3779 static int
3780 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3781                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3782 {
3783         PMD_INIT_FUNC_TRACE();
3784
3785         return -ENOSYS;
3786 }
3787
3788 /* Add a MAC address, and update filters */
3789 static int
3790 i40e_macaddr_add(struct rte_eth_dev *dev,
3791                  struct ether_addr *mac_addr,
3792                  __rte_unused uint32_t index,
3793                  uint32_t pool)
3794 {
3795         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3796         struct i40e_mac_filter_info mac_filter;
3797         struct i40e_vsi *vsi;
3798         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
3799         int ret;
3800
3801         /* If VMDQ not enabled or configured, return */
3802         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3803                           !pf->nb_cfg_vmdq_vsi)) {
3804                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3805                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3806                         pool);
3807                 return -ENOTSUP;
3808         }
3809
3810         if (pool > pf->nb_cfg_vmdq_vsi) {
3811                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3812                                 pool, pf->nb_cfg_vmdq_vsi);
3813                 return -EINVAL;
3814         }
3815
3816         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3817         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3818                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3819         else
3820                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3821
3822         if (pool == 0)
3823                 vsi = pf->main_vsi;
3824         else
3825                 vsi = pf->vmdq[pool - 1].vsi;
3826
3827         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3828         if (ret != I40E_SUCCESS) {
3829                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3830                 return -ENODEV;
3831         }
3832         return 0;
3833 }
3834
3835 /* Remove a MAC address, and update filters */
3836 static void
3837 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3838 {
3839         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3840         struct i40e_vsi *vsi;
3841         struct rte_eth_dev_data *data = dev->data;
3842         struct ether_addr *macaddr;
3843         int ret;
3844         uint32_t i;
3845         uint64_t pool_sel;
3846
3847         macaddr = &(data->mac_addrs[index]);
3848
3849         pool_sel = dev->data->mac_pool_sel[index];
3850
3851         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3852                 if (pool_sel & (1ULL << i)) {
3853                         if (i == 0)
3854                                 vsi = pf->main_vsi;
3855                         else {
3856                                 /* No VMDQ pool enabled or configured */
3857                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3858                                         (i > pf->nb_cfg_vmdq_vsi)) {
3859                                         PMD_DRV_LOG(ERR,
3860                                                 "No VMDQ pool enabled/configured");
3861                                         return;
3862                                 }
3863                                 vsi = pf->vmdq[i - 1].vsi;
3864                         }
3865                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3866
3867                         if (ret) {
3868                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3869                                 return;
3870                         }
3871                 }
3872         }
3873 }
3874
3875 /* Set perfect match or hash match of MAC and VLAN for a VF */
3876 static int
3877 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3878                  struct rte_eth_mac_filter *filter,
3879                  bool add)
3880 {
3881         struct i40e_hw *hw;
3882         struct i40e_mac_filter_info mac_filter;
3883         struct ether_addr old_mac;
3884         struct ether_addr *new_mac;
3885         struct i40e_pf_vf *vf = NULL;
3886         uint16_t vf_id;
3887         int ret;
3888
3889         if (pf == NULL) {
3890                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3891                 return -EINVAL;
3892         }
3893         hw = I40E_PF_TO_HW(pf);
3894
3895         if (filter == NULL) {
3896                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3897                 return -EINVAL;
3898         }
3899
3900         new_mac = &filter->mac_addr;
3901
3902         if (is_zero_ether_addr(new_mac)) {
3903                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3904                 return -EINVAL;
3905         }
3906
3907         vf_id = filter->dst_id;
3908
3909         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3910                 PMD_DRV_LOG(ERR, "Invalid argument.");
3911                 return -EINVAL;
3912         }
3913         vf = &pf->vfs[vf_id];
3914
3915         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3916                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3917                 return -EINVAL;
3918         }
3919
3920         if (add) {
3921                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3922                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3923                                 ETHER_ADDR_LEN);
3924                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3925                                  ETHER_ADDR_LEN);
3926
3927                 mac_filter.filter_type = filter->filter_type;
3928                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3929                 if (ret != I40E_SUCCESS) {
3930                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3931                         return -1;
3932                 }
3933                 ether_addr_copy(new_mac, &pf->dev_addr);
3934         } else {
3935                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3936                                 ETHER_ADDR_LEN);
3937                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3938                 if (ret != I40E_SUCCESS) {
3939                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3940                         return -1;
3941                 }
3942
3943                 /* Clear device address as it has been removed */
3944                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3945                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3946         }
3947
3948         return 0;
3949 }
3950
3951 /* MAC filter handle */
3952 static int
3953 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3954                 void *arg)
3955 {
3956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3957         struct rte_eth_mac_filter *filter;
3958         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3959         int ret = I40E_NOT_SUPPORTED;
3960
3961         filter = (struct rte_eth_mac_filter *)(arg);
3962
3963         switch (filter_op) {
3964         case RTE_ETH_FILTER_NOP:
3965                 ret = I40E_SUCCESS;
3966                 break;
3967         case RTE_ETH_FILTER_ADD:
3968                 i40e_pf_disable_irq0(hw);
3969                 if (filter->is_vf)
3970                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3971                 i40e_pf_enable_irq0(hw);
3972                 break;
3973         case RTE_ETH_FILTER_DELETE:
3974                 i40e_pf_disable_irq0(hw);
3975                 if (filter->is_vf)
3976                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3977                 i40e_pf_enable_irq0(hw);
3978                 break;
3979         default:
3980                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3981                 ret = I40E_ERR_PARAM;
3982                 break;
3983         }
3984
3985         return ret;
3986 }
3987
3988 static int
3989 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3990 {
3991         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3992         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3993         uint32_t reg;
3994         int ret;
3995
3996         if (!lut)
3997                 return -EINVAL;
3998
3999         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4000                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
4001                                           lut, lut_size);
4002                 if (ret) {
4003                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4004                         return ret;
4005                 }
4006         } else {
4007                 uint32_t *lut_dw = (uint32_t *)lut;
4008                 uint16_t i, lut_size_dw = lut_size / 4;
4009
4010                 if (vsi->type == I40E_VSI_SRIOV) {
4011                         for (i = 0; i <= lut_size_dw; i++) {
4012                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4013                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4014                         }
4015                 } else {
4016                         for (i = 0; i < lut_size_dw; i++)
4017                                 lut_dw[i] = I40E_READ_REG(hw,
4018                                                           I40E_PFQF_HLUT(i));
4019                 }
4020         }
4021
4022         return 0;
4023 }
4024
4025 int
4026 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4027 {
4028         struct i40e_pf *pf;
4029         struct i40e_hw *hw;
4030         int ret;
4031
4032         if (!vsi || !lut)
4033                 return -EINVAL;
4034
4035         pf = I40E_VSI_TO_PF(vsi);
4036         hw = I40E_VSI_TO_HW(vsi);
4037
4038         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4039                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
4040                                           lut, lut_size);
4041                 if (ret) {
4042                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4043                         return ret;
4044                 }
4045         } else {
4046                 uint32_t *lut_dw = (uint32_t *)lut;
4047                 uint16_t i, lut_size_dw = lut_size / 4;
4048
4049                 if (vsi->type == I40E_VSI_SRIOV) {
4050                         for (i = 0; i < lut_size_dw; i++)
4051                                 I40E_WRITE_REG(
4052                                         hw,
4053                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4054                                         lut_dw[i]);
4055                 } else {
4056                         for (i = 0; i < lut_size_dw; i++)
4057                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4058                                                lut_dw[i]);
4059                 }
4060                 I40E_WRITE_FLUSH(hw);
4061         }
4062
4063         return 0;
4064 }
4065
4066 static int
4067 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4068                          struct rte_eth_rss_reta_entry64 *reta_conf,
4069                          uint16_t reta_size)
4070 {
4071         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4072         uint16_t i, lut_size = pf->hash_lut_size;
4073         uint16_t idx, shift;
4074         uint8_t *lut;
4075         int ret;
4076
4077         if (reta_size != lut_size ||
4078                 reta_size > ETH_RSS_RETA_SIZE_512) {
4079                 PMD_DRV_LOG(ERR,
4080                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4081                         reta_size, lut_size);
4082                 return -EINVAL;
4083         }
4084
4085         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4086         if (!lut) {
4087                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4088                 return -ENOMEM;
4089         }
4090         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4091         if (ret)
4092                 goto out;
4093         for (i = 0; i < reta_size; i++) {
4094                 idx = i / RTE_RETA_GROUP_SIZE;
4095                 shift = i % RTE_RETA_GROUP_SIZE;
4096                 if (reta_conf[idx].mask & (1ULL << shift))
4097                         lut[i] = reta_conf[idx].reta[shift];
4098         }
4099         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4100
4101 out:
4102         rte_free(lut);
4103
4104         return ret;
4105 }
4106
4107 static int
4108 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4109                         struct rte_eth_rss_reta_entry64 *reta_conf,
4110                         uint16_t reta_size)
4111 {
4112         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4113         uint16_t i, lut_size = pf->hash_lut_size;
4114         uint16_t idx, shift;
4115         uint8_t *lut;
4116         int ret;
4117
4118         if (reta_size != lut_size ||
4119                 reta_size > ETH_RSS_RETA_SIZE_512) {
4120                 PMD_DRV_LOG(ERR,
4121                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4122                         reta_size, lut_size);
4123                 return -EINVAL;
4124         }
4125
4126         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4127         if (!lut) {
4128                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4129                 return -ENOMEM;
4130         }
4131
4132         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4133         if (ret)
4134                 goto out;
4135         for (i = 0; i < reta_size; i++) {
4136                 idx = i / RTE_RETA_GROUP_SIZE;
4137                 shift = i % RTE_RETA_GROUP_SIZE;
4138                 if (reta_conf[idx].mask & (1ULL << shift))
4139                         reta_conf[idx].reta[shift] = lut[i];
4140         }
4141
4142 out:
4143         rte_free(lut);
4144
4145         return ret;
4146 }
4147
4148 /**
4149  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4150  * @hw:   pointer to the HW structure
4151  * @mem:  pointer to mem struct to fill out
4152  * @size: size of memory requested
4153  * @alignment: what to align the allocation to
4154  **/
4155 enum i40e_status_code
4156 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4157                         struct i40e_dma_mem *mem,
4158                         u64 size,
4159                         u32 alignment)
4160 {
4161         const struct rte_memzone *mz = NULL;
4162         char z_name[RTE_MEMZONE_NAMESIZE];
4163
4164         if (!mem)
4165                 return I40E_ERR_PARAM;
4166
4167         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4168         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4169                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4170         if (!mz)
4171                 return I40E_ERR_NO_MEMORY;
4172
4173         mem->size = size;
4174         mem->va = mz->addr;
4175         mem->pa = mz->iova;
4176         mem->zone = (const void *)mz;
4177         PMD_DRV_LOG(DEBUG,
4178                 "memzone %s allocated with physical address: %"PRIu64,
4179                 mz->name, mem->pa);
4180
4181         return I40E_SUCCESS;
4182 }
4183
4184 /**
4185  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4186  * @hw:   pointer to the HW structure
4187  * @mem:  ptr to mem struct to free
4188  **/
4189 enum i40e_status_code
4190 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4191                     struct i40e_dma_mem *mem)
4192 {
4193         if (!mem)
4194                 return I40E_ERR_PARAM;
4195
4196         PMD_DRV_LOG(DEBUG,
4197                 "memzone %s to be freed with physical address: %"PRIu64,
4198                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4199         rte_memzone_free((const struct rte_memzone *)mem->zone);
4200         mem->zone = NULL;
4201         mem->va = NULL;
4202         mem->pa = (u64)0;
4203
4204         return I40E_SUCCESS;
4205 }
4206
4207 /**
4208  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4209  * @hw:   pointer to the HW structure
4210  * @mem:  pointer to mem struct to fill out
4211  * @size: size of memory requested
4212  **/
4213 enum i40e_status_code
4214 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4215                          struct i40e_virt_mem *mem,
4216                          u32 size)
4217 {
4218         if (!mem)
4219                 return I40E_ERR_PARAM;
4220
4221         mem->size = size;
4222         mem->va = rte_zmalloc("i40e", size, 0);
4223
4224         if (mem->va)
4225                 return I40E_SUCCESS;
4226         else
4227                 return I40E_ERR_NO_MEMORY;
4228 }
4229
4230 /**
4231  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4232  * @hw:   pointer to the HW structure
4233  * @mem:  pointer to mem struct to free
4234  **/
4235 enum i40e_status_code
4236 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4237                      struct i40e_virt_mem *mem)
4238 {
4239         if (!mem)
4240                 return I40E_ERR_PARAM;
4241
4242         rte_free(mem->va);
4243         mem->va = NULL;
4244
4245         return I40E_SUCCESS;
4246 }
4247
4248 void
4249 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4250 {
4251         rte_spinlock_init(&sp->spinlock);
4252 }
4253
4254 void
4255 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4256 {
4257         rte_spinlock_lock(&sp->spinlock);
4258 }
4259
4260 void
4261 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4262 {
4263         rte_spinlock_unlock(&sp->spinlock);
4264 }
4265
4266 void
4267 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4268 {
4269         return;
4270 }
4271
4272 /**
4273  * Get the hardware capabilities, which will be parsed
4274  * and saved into struct i40e_hw.
4275  */
4276 static int
4277 i40e_get_cap(struct i40e_hw *hw)
4278 {
4279         struct i40e_aqc_list_capabilities_element_resp *buf;
4280         uint16_t len, size = 0;
4281         int ret;
4282
4283         /* Calculate a huge enough buff for saving response data temporarily */
4284         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4285                                                 I40E_MAX_CAP_ELE_NUM;
4286         buf = rte_zmalloc("i40e", len, 0);
4287         if (!buf) {
4288                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4289                 return I40E_ERR_NO_MEMORY;
4290         }
4291
4292         /* Get, parse the capabilities and save it to hw */
4293         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4294                         i40e_aqc_opc_list_func_capabilities, NULL);
4295         if (ret != I40E_SUCCESS)
4296                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4297
4298         /* Free the temporary buffer after being used */
4299         rte_free(buf);
4300
4301         return ret;
4302 }
4303
4304 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4305 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4306
4307 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4308                 const char *value,
4309                 void *opaque)
4310 {
4311         struct i40e_pf *pf;
4312         unsigned long num;
4313         char *end;
4314
4315         pf = (struct i40e_pf *)opaque;
4316         RTE_SET_USED(key);
4317
4318         errno = 0;
4319         num = strtoul(value, &end, 0);
4320         if (errno != 0 || end == value || *end != 0) {
4321                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4322                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4323                 return -(EINVAL);
4324         }
4325
4326         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4327                 pf->vf_nb_qp_max = (uint16_t)num;
4328         else
4329                 /* here return 0 to make next valid same argument work */
4330                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4331                             "power of 2 and equal or less than 16 !, Now it is "
4332                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4333
4334         return 0;
4335 }
4336
4337 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4338 {
4339         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4340         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4341         struct rte_kvargs *kvlist;
4342
4343         /* set default queue number per VF as 4 */
4344         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4345
4346         if (dev->device->devargs == NULL)
4347                 return 0;
4348
4349         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4350         if (kvlist == NULL)
4351                 return -(EINVAL);
4352
4353         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4354                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4355                             "the first invalid or last valid one is used !",
4356                             QUEUE_NUM_PER_VF_ARG);
4357
4358         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4359                            i40e_pf_parse_vf_queue_number_handler, pf);
4360
4361         rte_kvargs_free(kvlist);
4362
4363         return 0;
4364 }
4365
4366 static int
4367 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4368 {
4369         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4370         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4371         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4372         uint16_t qp_count = 0, vsi_count = 0;
4373
4374         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4375                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4376                 return -EINVAL;
4377         }
4378
4379         i40e_pf_config_vf_rxq_number(dev);
4380
4381         /* Add the parameter init for LFC */
4382         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4383         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4384         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4385
4386         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4387         pf->max_num_vsi = hw->func_caps.num_vsis;
4388         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4389         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4390
4391         /* FDir queue/VSI allocation */
4392         pf->fdir_qp_offset = 0;
4393         if (hw->func_caps.fd) {
4394                 pf->flags |= I40E_FLAG_FDIR;
4395                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4396         } else {
4397                 pf->fdir_nb_qps = 0;
4398         }
4399         qp_count += pf->fdir_nb_qps;
4400         vsi_count += 1;
4401
4402         /* LAN queue/VSI allocation */
4403         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4404         if (!hw->func_caps.rss) {
4405                 pf->lan_nb_qps = 1;
4406         } else {
4407                 pf->flags |= I40E_FLAG_RSS;
4408                 if (hw->mac.type == I40E_MAC_X722)
4409                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4410                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4411         }
4412         qp_count += pf->lan_nb_qps;
4413         vsi_count += 1;
4414
4415         /* VF queue/VSI allocation */
4416         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4417         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4418                 pf->flags |= I40E_FLAG_SRIOV;
4419                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4420                 pf->vf_num = pci_dev->max_vfs;
4421                 PMD_DRV_LOG(DEBUG,
4422                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4423                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4424         } else {
4425                 pf->vf_nb_qps = 0;
4426                 pf->vf_num = 0;
4427         }
4428         qp_count += pf->vf_nb_qps * pf->vf_num;
4429         vsi_count += pf->vf_num;
4430
4431         /* VMDq queue/VSI allocation */
4432         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4433         pf->vmdq_nb_qps = 0;
4434         pf->max_nb_vmdq_vsi = 0;
4435         if (hw->func_caps.vmdq) {
4436                 if (qp_count < hw->func_caps.num_tx_qp &&
4437                         vsi_count < hw->func_caps.num_vsis) {
4438                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4439                                 qp_count) / pf->vmdq_nb_qp_max;
4440
4441                         /* Limit the maximum number of VMDq vsi to the maximum
4442                          * ethdev can support
4443                          */
4444                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4445                                 hw->func_caps.num_vsis - vsi_count);
4446                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4447                                 ETH_64_POOLS);
4448                         if (pf->max_nb_vmdq_vsi) {
4449                                 pf->flags |= I40E_FLAG_VMDQ;
4450                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4451                                 PMD_DRV_LOG(DEBUG,
4452                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4453                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4454                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4455                         } else {
4456                                 PMD_DRV_LOG(INFO,
4457                                         "No enough queues left for VMDq");
4458                         }
4459                 } else {
4460                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4461                 }
4462         }
4463         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4464         vsi_count += pf->max_nb_vmdq_vsi;
4465
4466         if (hw->func_caps.dcb)
4467                 pf->flags |= I40E_FLAG_DCB;
4468
4469         if (qp_count > hw->func_caps.num_tx_qp) {
4470                 PMD_DRV_LOG(ERR,
4471                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4472                         qp_count, hw->func_caps.num_tx_qp);
4473                 return -EINVAL;
4474         }
4475         if (vsi_count > hw->func_caps.num_vsis) {
4476                 PMD_DRV_LOG(ERR,
4477                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4478                         vsi_count, hw->func_caps.num_vsis);
4479                 return -EINVAL;
4480         }
4481
4482         return 0;
4483 }
4484
4485 static int
4486 i40e_pf_get_switch_config(struct i40e_pf *pf)
4487 {
4488         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4489         struct i40e_aqc_get_switch_config_resp *switch_config;
4490         struct i40e_aqc_switch_config_element_resp *element;
4491         uint16_t start_seid = 0, num_reported;
4492         int ret;
4493
4494         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4495                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4496         if (!switch_config) {
4497                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4498                 return -ENOMEM;
4499         }
4500
4501         /* Get the switch configurations */
4502         ret = i40e_aq_get_switch_config(hw, switch_config,
4503                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4504         if (ret != I40E_SUCCESS) {
4505                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4506                 goto fail;
4507         }
4508         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4509         if (num_reported != 1) { /* The number should be 1 */
4510                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4511                 goto fail;
4512         }
4513
4514         /* Parse the switch configuration elements */
4515         element = &(switch_config->element[0]);
4516         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4517                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4518                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4519         } else
4520                 PMD_DRV_LOG(INFO, "Unknown element type");
4521
4522 fail:
4523         rte_free(switch_config);
4524
4525         return ret;
4526 }
4527
4528 static int
4529 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4530                         uint32_t num)
4531 {
4532         struct pool_entry *entry;
4533
4534         if (pool == NULL || num == 0)
4535                 return -EINVAL;
4536
4537         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4538         if (entry == NULL) {
4539                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4540                 return -ENOMEM;
4541         }
4542
4543         /* queue heap initialize */
4544         pool->num_free = num;
4545         pool->num_alloc = 0;
4546         pool->base = base;
4547         LIST_INIT(&pool->alloc_list);
4548         LIST_INIT(&pool->free_list);
4549
4550         /* Initialize element  */
4551         entry->base = 0;
4552         entry->len = num;
4553
4554         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4555         return 0;
4556 }
4557
4558 static void
4559 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4560 {
4561         struct pool_entry *entry, *next_entry;
4562
4563         if (pool == NULL)
4564                 return;
4565
4566         for (entry = LIST_FIRST(&pool->alloc_list);
4567                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4568                         entry = next_entry) {
4569                 LIST_REMOVE(entry, next);
4570                 rte_free(entry);
4571         }
4572
4573         for (entry = LIST_FIRST(&pool->free_list);
4574                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4575                         entry = next_entry) {
4576                 LIST_REMOVE(entry, next);
4577                 rte_free(entry);
4578         }
4579
4580         pool->num_free = 0;
4581         pool->num_alloc = 0;
4582         pool->base = 0;
4583         LIST_INIT(&pool->alloc_list);
4584         LIST_INIT(&pool->free_list);
4585 }
4586
4587 static int
4588 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4589                        uint32_t base)
4590 {
4591         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4592         uint32_t pool_offset;
4593         int insert;
4594
4595         if (pool == NULL) {
4596                 PMD_DRV_LOG(ERR, "Invalid parameter");
4597                 return -EINVAL;
4598         }
4599
4600         pool_offset = base - pool->base;
4601         /* Lookup in alloc list */
4602         LIST_FOREACH(entry, &pool->alloc_list, next) {
4603                 if (entry->base == pool_offset) {
4604                         valid_entry = entry;
4605                         LIST_REMOVE(entry, next);
4606                         break;
4607                 }
4608         }
4609
4610         /* Not find, return */
4611         if (valid_entry == NULL) {
4612                 PMD_DRV_LOG(ERR, "Failed to find entry");
4613                 return -EINVAL;
4614         }
4615
4616         /**
4617          * Found it, move it to free list  and try to merge.
4618          * In order to make merge easier, always sort it by qbase.
4619          * Find adjacent prev and last entries.
4620          */
4621         prev = next = NULL;
4622         LIST_FOREACH(entry, &pool->free_list, next) {
4623                 if (entry->base > valid_entry->base) {
4624                         next = entry;
4625                         break;
4626                 }
4627                 prev = entry;
4628         }
4629
4630         insert = 0;
4631         /* Try to merge with next one*/
4632         if (next != NULL) {
4633                 /* Merge with next one */
4634                 if (valid_entry->base + valid_entry->len == next->base) {
4635                         next->base = valid_entry->base;
4636                         next->len += valid_entry->len;
4637                         rte_free(valid_entry);
4638                         valid_entry = next;
4639                         insert = 1;
4640                 }
4641         }
4642
4643         if (prev != NULL) {
4644                 /* Merge with previous one */
4645                 if (prev->base + prev->len == valid_entry->base) {
4646                         prev->len += valid_entry->len;
4647                         /* If it merge with next one, remove next node */
4648                         if (insert == 1) {
4649                                 LIST_REMOVE(valid_entry, next);
4650                                 rte_free(valid_entry);
4651                         } else {
4652                                 rte_free(valid_entry);
4653                                 insert = 1;
4654                         }
4655                 }
4656         }
4657
4658         /* Not find any entry to merge, insert */
4659         if (insert == 0) {
4660                 if (prev != NULL)
4661                         LIST_INSERT_AFTER(prev, valid_entry, next);
4662                 else if (next != NULL)
4663                         LIST_INSERT_BEFORE(next, valid_entry, next);
4664                 else /* It's empty list, insert to head */
4665                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4666         }
4667
4668         pool->num_free += valid_entry->len;
4669         pool->num_alloc -= valid_entry->len;
4670
4671         return 0;
4672 }
4673
4674 static int
4675 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4676                        uint16_t num)
4677 {
4678         struct pool_entry *entry, *valid_entry;
4679
4680         if (pool == NULL || num == 0) {
4681                 PMD_DRV_LOG(ERR, "Invalid parameter");
4682                 return -EINVAL;
4683         }
4684
4685         if (pool->num_free < num) {
4686                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4687                             num, pool->num_free);
4688                 return -ENOMEM;
4689         }
4690
4691         valid_entry = NULL;
4692         /* Lookup  in free list and find most fit one */
4693         LIST_FOREACH(entry, &pool->free_list, next) {
4694                 if (entry->len >= num) {
4695                         /* Find best one */
4696                         if (entry->len == num) {
4697                                 valid_entry = entry;
4698                                 break;
4699                         }
4700                         if (valid_entry == NULL || valid_entry->len > entry->len)
4701                                 valid_entry = entry;
4702                 }
4703         }
4704
4705         /* Not find one to satisfy the request, return */
4706         if (valid_entry == NULL) {
4707                 PMD_DRV_LOG(ERR, "No valid entry found");
4708                 return -ENOMEM;
4709         }
4710         /**
4711          * The entry have equal queue number as requested,
4712          * remove it from alloc_list.
4713          */
4714         if (valid_entry->len == num) {
4715                 LIST_REMOVE(valid_entry, next);
4716         } else {
4717                 /**
4718                  * The entry have more numbers than requested,
4719                  * create a new entry for alloc_list and minus its
4720                  * queue base and number in free_list.
4721                  */
4722                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4723                 if (entry == NULL) {
4724                         PMD_DRV_LOG(ERR,
4725                                 "Failed to allocate memory for resource pool");
4726                         return -ENOMEM;
4727                 }
4728                 entry->base = valid_entry->base;
4729                 entry->len = num;
4730                 valid_entry->base += num;
4731                 valid_entry->len -= num;
4732                 valid_entry = entry;
4733         }
4734
4735         /* Insert it into alloc list, not sorted */
4736         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4737
4738         pool->num_free -= valid_entry->len;
4739         pool->num_alloc += valid_entry->len;
4740
4741         return valid_entry->base + pool->base;
4742 }
4743
4744 /**
4745  * bitmap_is_subset - Check whether src2 is subset of src1
4746  **/
4747 static inline int
4748 bitmap_is_subset(uint8_t src1, uint8_t src2)
4749 {
4750         return !((src1 ^ src2) & src2);
4751 }
4752
4753 static enum i40e_status_code
4754 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4755 {
4756         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4757
4758         /* If DCB is not supported, only default TC is supported */
4759         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4760                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4761                 return I40E_NOT_SUPPORTED;
4762         }
4763
4764         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4765                 PMD_DRV_LOG(ERR,
4766                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4767                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4768                 return I40E_NOT_SUPPORTED;
4769         }
4770         return I40E_SUCCESS;
4771 }
4772
4773 int
4774 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4775                                 struct i40e_vsi_vlan_pvid_info *info)
4776 {
4777         struct i40e_hw *hw;
4778         struct i40e_vsi_context ctxt;
4779         uint8_t vlan_flags = 0;
4780         int ret;
4781
4782         if (vsi == NULL || info == NULL) {
4783                 PMD_DRV_LOG(ERR, "invalid parameters");
4784                 return I40E_ERR_PARAM;
4785         }
4786
4787         if (info->on) {
4788                 vsi->info.pvid = info->config.pvid;
4789                 /**
4790                  * If insert pvid is enabled, only tagged pkts are
4791                  * allowed to be sent out.
4792                  */
4793                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4794                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4795         } else {
4796                 vsi->info.pvid = 0;
4797                 if (info->config.reject.tagged == 0)
4798                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4799
4800                 if (info->config.reject.untagged == 0)
4801                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4802         }
4803         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4804                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4805         vsi->info.port_vlan_flags |= vlan_flags;
4806         vsi->info.valid_sections =
4807                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4808         memset(&ctxt, 0, sizeof(ctxt));
4809         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4810         ctxt.seid = vsi->seid;
4811
4812         hw = I40E_VSI_TO_HW(vsi);
4813         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4814         if (ret != I40E_SUCCESS)
4815                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4816
4817         return ret;
4818 }
4819
4820 static int
4821 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4822 {
4823         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4824         int i, ret;
4825         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4826
4827         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4828         if (ret != I40E_SUCCESS)
4829                 return ret;
4830
4831         if (!vsi->seid) {
4832                 PMD_DRV_LOG(ERR, "seid not valid");
4833                 return -EINVAL;
4834         }
4835
4836         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4837         tc_bw_data.tc_valid_bits = enabled_tcmap;
4838         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4839                 tc_bw_data.tc_bw_credits[i] =
4840                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4841
4842         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4843         if (ret != I40E_SUCCESS) {
4844                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4845                 return ret;
4846         }
4847
4848         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4849                                         sizeof(vsi->info.qs_handle));
4850         return I40E_SUCCESS;
4851 }
4852
4853 static enum i40e_status_code
4854 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4855                                  struct i40e_aqc_vsi_properties_data *info,
4856                                  uint8_t enabled_tcmap)
4857 {
4858         enum i40e_status_code ret;
4859         int i, total_tc = 0;
4860         uint16_t qpnum_per_tc, bsf, qp_idx;
4861
4862         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4863         if (ret != I40E_SUCCESS)
4864                 return ret;
4865
4866         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4867                 if (enabled_tcmap & (1 << i))
4868                         total_tc++;
4869         if (total_tc == 0)
4870                 total_tc = 1;
4871         vsi->enabled_tc = enabled_tcmap;
4872
4873         /* Number of queues per enabled TC */
4874         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4875         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4876         bsf = rte_bsf32(qpnum_per_tc);
4877
4878         /* Adjust the queue number to actual queues that can be applied */
4879         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4880                 vsi->nb_qps = qpnum_per_tc * total_tc;
4881
4882         /**
4883          * Configure TC and queue mapping parameters, for enabled TC,
4884          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4885          * default queue will serve it.
4886          */
4887         qp_idx = 0;
4888         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4889                 if (vsi->enabled_tc & (1 << i)) {
4890                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4891                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4892                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4893                         qp_idx += qpnum_per_tc;
4894                 } else
4895                         info->tc_mapping[i] = 0;
4896         }
4897
4898         /* Associate queue number with VSI */
4899         if (vsi->type == I40E_VSI_SRIOV) {
4900                 info->mapping_flags |=
4901                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4902                 for (i = 0; i < vsi->nb_qps; i++)
4903                         info->queue_mapping[i] =
4904                                 rte_cpu_to_le_16(vsi->base_queue + i);
4905         } else {
4906                 info->mapping_flags |=
4907                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4908                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4909         }
4910         info->valid_sections |=
4911                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4912
4913         return I40E_SUCCESS;
4914 }
4915
4916 static int
4917 i40e_veb_release(struct i40e_veb *veb)
4918 {
4919         struct i40e_vsi *vsi;
4920         struct i40e_hw *hw;
4921
4922         if (veb == NULL)
4923                 return -EINVAL;
4924
4925         if (!TAILQ_EMPTY(&veb->head)) {
4926                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4927                 return -EACCES;
4928         }
4929         /* associate_vsi field is NULL for floating VEB */
4930         if (veb->associate_vsi != NULL) {
4931                 vsi = veb->associate_vsi;
4932                 hw = I40E_VSI_TO_HW(vsi);
4933
4934                 vsi->uplink_seid = veb->uplink_seid;
4935                 vsi->veb = NULL;
4936         } else {
4937                 veb->associate_pf->main_vsi->floating_veb = NULL;
4938                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4939         }
4940
4941         i40e_aq_delete_element(hw, veb->seid, NULL);
4942         rte_free(veb);
4943         return I40E_SUCCESS;
4944 }
4945
4946 /* Setup a veb */
4947 static struct i40e_veb *
4948 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4949 {
4950         struct i40e_veb *veb;
4951         int ret;
4952         struct i40e_hw *hw;
4953
4954         if (pf == NULL) {
4955                 PMD_DRV_LOG(ERR,
4956                             "veb setup failed, associated PF shouldn't null");
4957                 return NULL;
4958         }
4959         hw = I40E_PF_TO_HW(pf);
4960
4961         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4962         if (!veb) {
4963                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4964                 goto fail;
4965         }
4966
4967         veb->associate_vsi = vsi;
4968         veb->associate_pf = pf;
4969         TAILQ_INIT(&veb->head);
4970         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4971
4972         /* create floating veb if vsi is NULL */
4973         if (vsi != NULL) {
4974                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4975                                       I40E_DEFAULT_TCMAP, false,
4976                                       &veb->seid, false, NULL);
4977         } else {
4978                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4979                                       true, &veb->seid, false, NULL);
4980         }
4981
4982         if (ret != I40E_SUCCESS) {
4983                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4984                             hw->aq.asq_last_status);
4985                 goto fail;
4986         }
4987         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4988
4989         /* get statistics index */
4990         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4991                                 &veb->stats_idx, NULL, NULL, NULL);
4992         if (ret != I40E_SUCCESS) {
4993                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4994                             hw->aq.asq_last_status);
4995                 goto fail;
4996         }
4997         /* Get VEB bandwidth, to be implemented */
4998         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4999         if (vsi)
5000                 vsi->uplink_seid = veb->seid;
5001
5002         return veb;
5003 fail:
5004         rte_free(veb);
5005         return NULL;
5006 }
5007
5008 int
5009 i40e_vsi_release(struct i40e_vsi *vsi)
5010 {
5011         struct i40e_pf *pf;
5012         struct i40e_hw *hw;
5013         struct i40e_vsi_list *vsi_list;
5014         void *temp;
5015         int ret;
5016         struct i40e_mac_filter *f;
5017         uint16_t user_param;
5018
5019         if (!vsi)
5020                 return I40E_SUCCESS;
5021
5022         if (!vsi->adapter)
5023                 return -EFAULT;
5024
5025         user_param = vsi->user_param;
5026
5027         pf = I40E_VSI_TO_PF(vsi);
5028         hw = I40E_VSI_TO_HW(vsi);
5029
5030         /* VSI has child to attach, release child first */
5031         if (vsi->veb) {
5032                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5033                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5034                                 return -1;
5035                 }
5036                 i40e_veb_release(vsi->veb);
5037         }
5038
5039         if (vsi->floating_veb) {
5040                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5041                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5042                                 return -1;
5043                 }
5044         }
5045
5046         /* Remove all macvlan filters of the VSI */
5047         i40e_vsi_remove_all_macvlan_filter(vsi);
5048         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5049                 rte_free(f);
5050
5051         if (vsi->type != I40E_VSI_MAIN &&
5052             ((vsi->type != I40E_VSI_SRIOV) ||
5053             !pf->floating_veb_list[user_param])) {
5054                 /* Remove vsi from parent's sibling list */
5055                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5056                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5057                         return I40E_ERR_PARAM;
5058                 }
5059                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5060                                 &vsi->sib_vsi_list, list);
5061
5062                 /* Remove all switch element of the VSI */
5063                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5064                 if (ret != I40E_SUCCESS)
5065                         PMD_DRV_LOG(ERR, "Failed to delete element");
5066         }
5067
5068         if ((vsi->type == I40E_VSI_SRIOV) &&
5069             pf->floating_veb_list[user_param]) {
5070                 /* Remove vsi from parent's sibling list */
5071                 if (vsi->parent_vsi == NULL ||
5072                     vsi->parent_vsi->floating_veb == NULL) {
5073                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5074                         return I40E_ERR_PARAM;
5075                 }
5076                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5077                              &vsi->sib_vsi_list, list);
5078
5079                 /* Remove all switch element of the VSI */
5080                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5081                 if (ret != I40E_SUCCESS)
5082                         PMD_DRV_LOG(ERR, "Failed to delete element");
5083         }
5084
5085         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5086
5087         if (vsi->type != I40E_VSI_SRIOV)
5088                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5089         rte_free(vsi);
5090
5091         return I40E_SUCCESS;
5092 }
5093
5094 static int
5095 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5096 {
5097         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5098         struct i40e_aqc_remove_macvlan_element_data def_filter;
5099         struct i40e_mac_filter_info filter;
5100         int ret;
5101
5102         if (vsi->type != I40E_VSI_MAIN)
5103                 return I40E_ERR_CONFIG;
5104         memset(&def_filter, 0, sizeof(def_filter));
5105         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5106                                         ETH_ADDR_LEN);
5107         def_filter.vlan_tag = 0;
5108         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5109                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5110         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5111         if (ret != I40E_SUCCESS) {
5112                 struct i40e_mac_filter *f;
5113                 struct ether_addr *mac;
5114
5115                 PMD_DRV_LOG(DEBUG,
5116                             "Cannot remove the default macvlan filter");
5117                 /* It needs to add the permanent mac into mac list */
5118                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5119                 if (f == NULL) {
5120                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5121                         return I40E_ERR_NO_MEMORY;
5122                 }
5123                 mac = &f->mac_info.mac_addr;
5124                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5125                                 ETH_ADDR_LEN);
5126                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5127                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5128                 vsi->mac_num++;
5129
5130                 return ret;
5131         }
5132         rte_memcpy(&filter.mac_addr,
5133                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5134         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5135         return i40e_vsi_add_mac(vsi, &filter);
5136 }
5137
5138 /*
5139  * i40e_vsi_get_bw_config - Query VSI BW Information
5140  * @vsi: the VSI to be queried
5141  *
5142  * Returns 0 on success, negative value on failure
5143  */
5144 static enum i40e_status_code
5145 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5146 {
5147         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5148         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5149         struct i40e_hw *hw = &vsi->adapter->hw;
5150         i40e_status ret;
5151         int i;
5152         uint32_t bw_max;
5153
5154         memset(&bw_config, 0, sizeof(bw_config));
5155         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5156         if (ret != I40E_SUCCESS) {
5157                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5158                             hw->aq.asq_last_status);
5159                 return ret;
5160         }
5161
5162         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5163         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5164                                         &ets_sla_config, NULL);
5165         if (ret != I40E_SUCCESS) {
5166                 PMD_DRV_LOG(ERR,
5167                         "VSI failed to get TC bandwdith configuration %u",
5168                         hw->aq.asq_last_status);
5169                 return ret;
5170         }
5171
5172         /* store and print out BW info */
5173         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5174         vsi->bw_info.bw_max = bw_config.max_bw;
5175         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5176         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5177         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5178                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5179                      I40E_16_BIT_WIDTH);
5180         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5181                 vsi->bw_info.bw_ets_share_credits[i] =
5182                                 ets_sla_config.share_credits[i];
5183                 vsi->bw_info.bw_ets_credits[i] =
5184                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5185                 /* 4 bits per TC, 4th bit is reserved */
5186                 vsi->bw_info.bw_ets_max[i] =
5187                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5188                                   RTE_LEN2MASK(3, uint8_t));
5189                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5190                             vsi->bw_info.bw_ets_share_credits[i]);
5191                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5192                             vsi->bw_info.bw_ets_credits[i]);
5193                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5194                             vsi->bw_info.bw_ets_max[i]);
5195         }
5196
5197         return I40E_SUCCESS;
5198 }
5199
5200 /* i40e_enable_pf_lb
5201  * @pf: pointer to the pf structure
5202  *
5203  * allow loopback on pf
5204  */
5205 static inline void
5206 i40e_enable_pf_lb(struct i40e_pf *pf)
5207 {
5208         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5209         struct i40e_vsi_context ctxt;
5210         int ret;
5211
5212         /* Use the FW API if FW >= v5.0 */
5213         if (hw->aq.fw_maj_ver < 5) {
5214                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5215                 return;
5216         }
5217
5218         memset(&ctxt, 0, sizeof(ctxt));
5219         ctxt.seid = pf->main_vsi_seid;
5220         ctxt.pf_num = hw->pf_id;
5221         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5222         if (ret) {
5223                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5224                             ret, hw->aq.asq_last_status);
5225                 return;
5226         }
5227         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5228         ctxt.info.valid_sections =
5229                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5230         ctxt.info.switch_id |=
5231                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5232
5233         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5234         if (ret)
5235                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5236                             hw->aq.asq_last_status);
5237 }
5238
5239 /* Setup a VSI */
5240 struct i40e_vsi *
5241 i40e_vsi_setup(struct i40e_pf *pf,
5242                enum i40e_vsi_type type,
5243                struct i40e_vsi *uplink_vsi,
5244                uint16_t user_param)
5245 {
5246         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5247         struct i40e_vsi *vsi;
5248         struct i40e_mac_filter_info filter;
5249         int ret;
5250         struct i40e_vsi_context ctxt;
5251         struct ether_addr broadcast =
5252                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5253
5254         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5255             uplink_vsi == NULL) {
5256                 PMD_DRV_LOG(ERR,
5257                         "VSI setup failed, VSI link shouldn't be NULL");
5258                 return NULL;
5259         }
5260
5261         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5262                 PMD_DRV_LOG(ERR,
5263                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5264                 return NULL;
5265         }
5266
5267         /* two situations
5268          * 1.type is not MAIN and uplink vsi is not NULL
5269          * If uplink vsi didn't setup VEB, create one first under veb field
5270          * 2.type is SRIOV and the uplink is NULL
5271          * If floating VEB is NULL, create one veb under floating veb field
5272          */
5273
5274         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5275             uplink_vsi->veb == NULL) {
5276                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5277
5278                 if (uplink_vsi->veb == NULL) {
5279                         PMD_DRV_LOG(ERR, "VEB setup failed");
5280                         return NULL;
5281                 }
5282                 /* set ALLOWLOOPBACk on pf, when veb is created */
5283                 i40e_enable_pf_lb(pf);
5284         }
5285
5286         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5287             pf->main_vsi->floating_veb == NULL) {
5288                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5289
5290                 if (pf->main_vsi->floating_veb == NULL) {
5291                         PMD_DRV_LOG(ERR, "VEB setup failed");
5292                         return NULL;
5293                 }
5294         }
5295
5296         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5297         if (!vsi) {
5298                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5299                 return NULL;
5300         }
5301         TAILQ_INIT(&vsi->mac_list);
5302         vsi->type = type;
5303         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5304         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5305         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5306         vsi->user_param = user_param;
5307         vsi->vlan_anti_spoof_on = 0;
5308         vsi->vlan_filter_on = 0;
5309         /* Allocate queues */
5310         switch (vsi->type) {
5311         case I40E_VSI_MAIN  :
5312                 vsi->nb_qps = pf->lan_nb_qps;
5313                 break;
5314         case I40E_VSI_SRIOV :
5315                 vsi->nb_qps = pf->vf_nb_qps;
5316                 break;
5317         case I40E_VSI_VMDQ2:
5318                 vsi->nb_qps = pf->vmdq_nb_qps;
5319                 break;
5320         case I40E_VSI_FDIR:
5321                 vsi->nb_qps = pf->fdir_nb_qps;
5322                 break;
5323         default:
5324                 goto fail_mem;
5325         }
5326         /*
5327          * The filter status descriptor is reported in rx queue 0,
5328          * while the tx queue for fdir filter programming has no
5329          * such constraints, can be non-zero queues.
5330          * To simplify it, choose FDIR vsi use queue 0 pair.
5331          * To make sure it will use queue 0 pair, queue allocation
5332          * need be done before this function is called
5333          */
5334         if (type != I40E_VSI_FDIR) {
5335                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5336                         if (ret < 0) {
5337                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5338                                                 vsi->seid, ret);
5339                                 goto fail_mem;
5340                         }
5341                         vsi->base_queue = ret;
5342         } else
5343                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5344
5345         /* VF has MSIX interrupt in VF range, don't allocate here */
5346         if (type == I40E_VSI_MAIN) {
5347                 if (pf->support_multi_driver) {
5348                         /* If support multi-driver, need to use INT0 instead of
5349                          * allocating from msix pool. The Msix pool is init from
5350                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5351                          * to 1 without calling i40e_res_pool_alloc.
5352                          */
5353                         vsi->msix_intr = 0;
5354                         vsi->nb_msix = 1;
5355                 } else {
5356                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5357                                                   RTE_MIN(vsi->nb_qps,
5358                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5359                         if (ret < 0) {
5360                                 PMD_DRV_LOG(ERR,
5361                                             "VSI MAIN %d get heap failed %d",
5362                                             vsi->seid, ret);
5363                                 goto fail_queue_alloc;
5364                         }
5365                         vsi->msix_intr = ret;
5366                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5367                                                RTE_MAX_RXTX_INTR_VEC_ID);
5368                 }
5369         } else if (type != I40E_VSI_SRIOV) {
5370                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5371                 if (ret < 0) {
5372                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5373                         goto fail_queue_alloc;
5374                 }
5375                 vsi->msix_intr = ret;
5376                 vsi->nb_msix = 1;
5377         } else {
5378                 vsi->msix_intr = 0;
5379                 vsi->nb_msix = 0;
5380         }
5381
5382         /* Add VSI */
5383         if (type == I40E_VSI_MAIN) {
5384                 /* For main VSI, no need to add since it's default one */
5385                 vsi->uplink_seid = pf->mac_seid;
5386                 vsi->seid = pf->main_vsi_seid;
5387                 /* Bind queues with specific MSIX interrupt */
5388                 /**
5389                  * Needs 2 interrupt at least, one for misc cause which will
5390                  * enabled from OS side, Another for queues binding the
5391                  * interrupt from device side only.
5392                  */
5393
5394                 /* Get default VSI parameters from hardware */
5395                 memset(&ctxt, 0, sizeof(ctxt));
5396                 ctxt.seid = vsi->seid;
5397                 ctxt.pf_num = hw->pf_id;
5398                 ctxt.uplink_seid = vsi->uplink_seid;
5399                 ctxt.vf_num = 0;
5400                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5401                 if (ret != I40E_SUCCESS) {
5402                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5403                         goto fail_msix_alloc;
5404                 }
5405                 rte_memcpy(&vsi->info, &ctxt.info,
5406                         sizeof(struct i40e_aqc_vsi_properties_data));
5407                 vsi->vsi_id = ctxt.vsi_number;
5408                 vsi->info.valid_sections = 0;
5409
5410                 /* Configure tc, enabled TC0 only */
5411                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5412                         I40E_SUCCESS) {
5413                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5414                         goto fail_msix_alloc;
5415                 }
5416
5417                 /* TC, queue mapping */
5418                 memset(&ctxt, 0, sizeof(ctxt));
5419                 vsi->info.valid_sections |=
5420                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5421                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5422                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5423                 rte_memcpy(&ctxt.info, &vsi->info,
5424                         sizeof(struct i40e_aqc_vsi_properties_data));
5425                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5426                                                 I40E_DEFAULT_TCMAP);
5427                 if (ret != I40E_SUCCESS) {
5428                         PMD_DRV_LOG(ERR,
5429                                 "Failed to configure TC queue mapping");
5430                         goto fail_msix_alloc;
5431                 }
5432                 ctxt.seid = vsi->seid;
5433                 ctxt.pf_num = hw->pf_id;
5434                 ctxt.uplink_seid = vsi->uplink_seid;
5435                 ctxt.vf_num = 0;
5436
5437                 /* Update VSI parameters */
5438                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5439                 if (ret != I40E_SUCCESS) {
5440                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5441                         goto fail_msix_alloc;
5442                 }
5443
5444                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5445                                                 sizeof(vsi->info.tc_mapping));
5446                 rte_memcpy(&vsi->info.queue_mapping,
5447                                 &ctxt.info.queue_mapping,
5448                         sizeof(vsi->info.queue_mapping));
5449                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5450                 vsi->info.valid_sections = 0;
5451
5452                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5453                                 ETH_ADDR_LEN);
5454
5455                 /**
5456                  * Updating default filter settings are necessary to prevent
5457                  * reception of tagged packets.
5458                  * Some old firmware configurations load a default macvlan
5459                  * filter which accepts both tagged and untagged packets.
5460                  * The updating is to use a normal filter instead if needed.
5461                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5462                  * The firmware with correct configurations load the default
5463                  * macvlan filter which is expected and cannot be removed.
5464                  */
5465                 i40e_update_default_filter_setting(vsi);
5466                 i40e_config_qinq(hw, vsi);
5467         } else if (type == I40E_VSI_SRIOV) {
5468                 memset(&ctxt, 0, sizeof(ctxt));
5469                 /**
5470                  * For other VSI, the uplink_seid equals to uplink VSI's
5471                  * uplink_seid since they share same VEB
5472                  */
5473                 if (uplink_vsi == NULL)
5474                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5475                 else
5476                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5477                 ctxt.pf_num = hw->pf_id;
5478                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5479                 ctxt.uplink_seid = vsi->uplink_seid;
5480                 ctxt.connection_type = 0x1;
5481                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5482
5483                 /* Use the VEB configuration if FW >= v5.0 */
5484                 if (hw->aq.fw_maj_ver >= 5) {
5485                         /* Configure switch ID */
5486                         ctxt.info.valid_sections |=
5487                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5488                         ctxt.info.switch_id =
5489                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5490                 }
5491
5492                 /* Configure port/vlan */
5493                 ctxt.info.valid_sections |=
5494                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5495                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5496                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5497                                                 hw->func_caps.enabled_tcmap);
5498                 if (ret != I40E_SUCCESS) {
5499                         PMD_DRV_LOG(ERR,
5500                                 "Failed to configure TC queue mapping");
5501                         goto fail_msix_alloc;
5502                 }
5503
5504                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5505                 ctxt.info.valid_sections |=
5506                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5507                 /**
5508                  * Since VSI is not created yet, only configure parameter,
5509                  * will add vsi below.
5510                  */
5511
5512                 i40e_config_qinq(hw, vsi);
5513         } else if (type == I40E_VSI_VMDQ2) {
5514                 memset(&ctxt, 0, sizeof(ctxt));
5515                 /*
5516                  * For other VSI, the uplink_seid equals to uplink VSI's
5517                  * uplink_seid since they share same VEB
5518                  */
5519                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5520                 ctxt.pf_num = hw->pf_id;
5521                 ctxt.vf_num = 0;
5522                 ctxt.uplink_seid = vsi->uplink_seid;
5523                 ctxt.connection_type = 0x1;
5524                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5525
5526                 ctxt.info.valid_sections |=
5527                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5528                 /* user_param carries flag to enable loop back */
5529                 if (user_param) {
5530                         ctxt.info.switch_id =
5531                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5532                         ctxt.info.switch_id |=
5533                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5534                 }
5535
5536                 /* Configure port/vlan */
5537                 ctxt.info.valid_sections |=
5538                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5539                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5540                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5541                                                 I40E_DEFAULT_TCMAP);
5542                 if (ret != I40E_SUCCESS) {
5543                         PMD_DRV_LOG(ERR,
5544                                 "Failed to configure TC queue mapping");
5545                         goto fail_msix_alloc;
5546                 }
5547                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5548                 ctxt.info.valid_sections |=
5549                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5550         } else if (type == I40E_VSI_FDIR) {
5551                 memset(&ctxt, 0, sizeof(ctxt));
5552                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5553                 ctxt.pf_num = hw->pf_id;
5554                 ctxt.vf_num = 0;
5555                 ctxt.uplink_seid = vsi->uplink_seid;
5556                 ctxt.connection_type = 0x1;     /* regular data port */
5557                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5558                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5559                                                 I40E_DEFAULT_TCMAP);
5560                 if (ret != I40E_SUCCESS) {
5561                         PMD_DRV_LOG(ERR,
5562                                 "Failed to configure TC queue mapping.");
5563                         goto fail_msix_alloc;
5564                 }
5565                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5566                 ctxt.info.valid_sections |=
5567                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5568         } else {
5569                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5570                 goto fail_msix_alloc;
5571         }
5572
5573         if (vsi->type != I40E_VSI_MAIN) {
5574                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5575                 if (ret != I40E_SUCCESS) {
5576                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5577                                     hw->aq.asq_last_status);
5578                         goto fail_msix_alloc;
5579                 }
5580                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5581                 vsi->info.valid_sections = 0;
5582                 vsi->seid = ctxt.seid;
5583                 vsi->vsi_id = ctxt.vsi_number;
5584                 vsi->sib_vsi_list.vsi = vsi;
5585                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5586                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5587                                           &vsi->sib_vsi_list, list);
5588                 } else {
5589                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5590                                           &vsi->sib_vsi_list, list);
5591                 }
5592         }
5593
5594         /* MAC/VLAN configuration */
5595         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5596         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5597
5598         ret = i40e_vsi_add_mac(vsi, &filter);
5599         if (ret != I40E_SUCCESS) {
5600                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5601                 goto fail_msix_alloc;
5602         }
5603
5604         /* Get VSI BW information */
5605         i40e_vsi_get_bw_config(vsi);
5606         return vsi;
5607 fail_msix_alloc:
5608         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5609 fail_queue_alloc:
5610         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5611 fail_mem:
5612         rte_free(vsi);
5613         return NULL;
5614 }
5615
5616 /* Configure vlan filter on or off */
5617 int
5618 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5619 {
5620         int i, num;
5621         struct i40e_mac_filter *f;
5622         void *temp;
5623         struct i40e_mac_filter_info *mac_filter;
5624         enum rte_mac_filter_type desired_filter;
5625         int ret = I40E_SUCCESS;
5626
5627         if (on) {
5628                 /* Filter to match MAC and VLAN */
5629                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5630         } else {
5631                 /* Filter to match only MAC */
5632                 desired_filter = RTE_MAC_PERFECT_MATCH;
5633         }
5634
5635         num = vsi->mac_num;
5636
5637         mac_filter = rte_zmalloc("mac_filter_info_data",
5638                                  num * sizeof(*mac_filter), 0);
5639         if (mac_filter == NULL) {
5640                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5641                 return I40E_ERR_NO_MEMORY;
5642         }
5643
5644         i = 0;
5645
5646         /* Remove all existing mac */
5647         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5648                 mac_filter[i] = f->mac_info;
5649                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5650                 if (ret) {
5651                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5652                                     on ? "enable" : "disable");
5653                         goto DONE;
5654                 }
5655                 i++;
5656         }
5657
5658         /* Override with new filter */
5659         for (i = 0; i < num; i++) {
5660                 mac_filter[i].filter_type = desired_filter;
5661                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5662                 if (ret) {
5663                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5664                                     on ? "enable" : "disable");
5665                         goto DONE;
5666                 }
5667         }
5668
5669 DONE:
5670         rte_free(mac_filter);
5671         return ret;
5672 }
5673
5674 /* Configure vlan stripping on or off */
5675 int
5676 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5677 {
5678         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5679         struct i40e_vsi_context ctxt;
5680         uint8_t vlan_flags;
5681         int ret = I40E_SUCCESS;
5682
5683         /* Check if it has been already on or off */
5684         if (vsi->info.valid_sections &
5685                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5686                 if (on) {
5687                         if ((vsi->info.port_vlan_flags &
5688                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5689                                 return 0; /* already on */
5690                 } else {
5691                         if ((vsi->info.port_vlan_flags &
5692                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5693                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5694                                 return 0; /* already off */
5695                 }
5696         }
5697
5698         if (on)
5699                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5700         else
5701                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5702         vsi->info.valid_sections =
5703                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5704         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5705         vsi->info.port_vlan_flags |= vlan_flags;
5706         ctxt.seid = vsi->seid;
5707         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5708         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5709         if (ret)
5710                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5711                             on ? "enable" : "disable");
5712
5713         return ret;
5714 }
5715
5716 static int
5717 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5718 {
5719         struct rte_eth_dev_data *data = dev->data;
5720         int ret;
5721         int mask = 0;
5722
5723         /* Apply vlan offload setting */
5724         mask = ETH_VLAN_STRIP_MASK |
5725                ETH_VLAN_FILTER_MASK |
5726                ETH_VLAN_EXTEND_MASK;
5727         ret = i40e_vlan_offload_set(dev, mask);
5728         if (ret) {
5729                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5730                 return ret;
5731         }
5732
5733         /* Apply pvid setting */
5734         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5735                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5736         if (ret)
5737                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5738
5739         return ret;
5740 }
5741
5742 static int
5743 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5744 {
5745         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5746
5747         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5748 }
5749
5750 static int
5751 i40e_update_flow_control(struct i40e_hw *hw)
5752 {
5753 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5754         struct i40e_link_status link_status;
5755         uint32_t rxfc = 0, txfc = 0, reg;
5756         uint8_t an_info;
5757         int ret;
5758
5759         memset(&link_status, 0, sizeof(link_status));
5760         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5761         if (ret != I40E_SUCCESS) {
5762                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5763                 goto write_reg; /* Disable flow control */
5764         }
5765
5766         an_info = hw->phy.link_info.an_info;
5767         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5768                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5769                 ret = I40E_ERR_NOT_READY;
5770                 goto write_reg; /* Disable flow control */
5771         }
5772         /**
5773          * If link auto negotiation is enabled, flow control needs to
5774          * be configured according to it
5775          */
5776         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5777         case I40E_LINK_PAUSE_RXTX:
5778                 rxfc = 1;
5779                 txfc = 1;
5780                 hw->fc.current_mode = I40E_FC_FULL;
5781                 break;
5782         case I40E_AQ_LINK_PAUSE_RX:
5783                 rxfc = 1;
5784                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5785                 break;
5786         case I40E_AQ_LINK_PAUSE_TX:
5787                 txfc = 1;
5788                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5789                 break;
5790         default:
5791                 hw->fc.current_mode = I40E_FC_NONE;
5792                 break;
5793         }
5794
5795 write_reg:
5796         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5797                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5798         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5799         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5800         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5801         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5802
5803         return ret;
5804 }
5805
5806 /* PF setup */
5807 static int
5808 i40e_pf_setup(struct i40e_pf *pf)
5809 {
5810         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5811         struct i40e_filter_control_settings settings;
5812         struct i40e_vsi *vsi;
5813         int ret;
5814
5815         /* Clear all stats counters */
5816         pf->offset_loaded = FALSE;
5817         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5818         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5819         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5820         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5821
5822         ret = i40e_pf_get_switch_config(pf);
5823         if (ret != I40E_SUCCESS) {
5824                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5825                 return ret;
5826         }
5827
5828         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
5829         if (ret)
5830                 PMD_INIT_LOG(WARNING,
5831                         "failed to allocate switch domain for device %d", ret);
5832
5833         if (pf->flags & I40E_FLAG_FDIR) {
5834                 /* make queue allocated first, let FDIR use queue pair 0*/
5835                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5836                 if (ret != I40E_FDIR_QUEUE_ID) {
5837                         PMD_DRV_LOG(ERR,
5838                                 "queue allocation fails for FDIR: ret =%d",
5839                                 ret);
5840                         pf->flags &= ~I40E_FLAG_FDIR;
5841                 }
5842         }
5843         /*  main VSI setup */
5844         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5845         if (!vsi) {
5846                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5847                 return I40E_ERR_NOT_READY;
5848         }
5849         pf->main_vsi = vsi;
5850
5851         /* Configure filter control */
5852         memset(&settings, 0, sizeof(settings));
5853         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5854                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5855         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5856                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5857         else {
5858                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5859                         hw->func_caps.rss_table_size);
5860                 return I40E_ERR_PARAM;
5861         }
5862         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5863                 hw->func_caps.rss_table_size);
5864         pf->hash_lut_size = hw->func_caps.rss_table_size;
5865
5866         /* Enable ethtype and macvlan filters */
5867         settings.enable_ethtype = TRUE;
5868         settings.enable_macvlan = TRUE;
5869         ret = i40e_set_filter_control(hw, &settings);
5870         if (ret)
5871                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5872                                                                 ret);
5873
5874         /* Update flow control according to the auto negotiation */
5875         i40e_update_flow_control(hw);
5876
5877         return I40E_SUCCESS;
5878 }
5879
5880 int
5881 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5882 {
5883         uint32_t reg;
5884         uint16_t j;
5885
5886         /**
5887          * Set or clear TX Queue Disable flags,
5888          * which is required by hardware.
5889          */
5890         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5891         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5892
5893         /* Wait until the request is finished */
5894         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5895                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5896                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5897                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5898                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5899                                                         & 0x1))) {
5900                         break;
5901                 }
5902         }
5903         if (on) {
5904                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5905                         return I40E_SUCCESS; /* already on, skip next steps */
5906
5907                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5908                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5909         } else {
5910                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5911                         return I40E_SUCCESS; /* already off, skip next steps */
5912                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5913         }
5914         /* Write the register */
5915         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5916         /* Check the result */
5917         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5918                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5919                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5920                 if (on) {
5921                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5922                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5923                                 break;
5924                 } else {
5925                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5926                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5927                                 break;
5928                 }
5929         }
5930         /* Check if it is timeout */
5931         if (j >= I40E_CHK_Q_ENA_COUNT) {
5932                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5933                             (on ? "enable" : "disable"), q_idx);
5934                 return I40E_ERR_TIMEOUT;
5935         }
5936
5937         return I40E_SUCCESS;
5938 }
5939
5940 /* Swith on or off the tx queues */
5941 static int
5942 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5943 {
5944         struct rte_eth_dev_data *dev_data = pf->dev_data;
5945         struct i40e_tx_queue *txq;
5946         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5947         uint16_t i;
5948         int ret;
5949
5950         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5951                 txq = dev_data->tx_queues[i];
5952                 /* Don't operate the queue if not configured or
5953                  * if starting only per queue */
5954                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5955                         continue;
5956                 if (on)
5957                         ret = i40e_dev_tx_queue_start(dev, i);
5958                 else
5959                         ret = i40e_dev_tx_queue_stop(dev, i);
5960                 if ( ret != I40E_SUCCESS)
5961                         return ret;
5962         }
5963
5964         return I40E_SUCCESS;
5965 }
5966
5967 int
5968 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5969 {
5970         uint32_t reg;
5971         uint16_t j;
5972
5973         /* Wait until the request is finished */
5974         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5975                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5976                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5977                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5978                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5979                         break;
5980         }
5981
5982         if (on) {
5983                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5984                         return I40E_SUCCESS; /* Already on, skip next steps */
5985                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5986         } else {
5987                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5988                         return I40E_SUCCESS; /* Already off, skip next steps */
5989                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5990         }
5991
5992         /* Write the register */
5993         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5994         /* Check the result */
5995         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5996                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5997                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5998                 if (on) {
5999                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6000                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6001                                 break;
6002                 } else {
6003                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6004                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6005                                 break;
6006                 }
6007         }
6008
6009         /* Check if it is timeout */
6010         if (j >= I40E_CHK_Q_ENA_COUNT) {
6011                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6012                             (on ? "enable" : "disable"), q_idx);
6013                 return I40E_ERR_TIMEOUT;
6014         }
6015
6016         return I40E_SUCCESS;
6017 }
6018 /* Switch on or off the rx queues */
6019 static int
6020 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
6021 {
6022         struct rte_eth_dev_data *dev_data = pf->dev_data;
6023         struct i40e_rx_queue *rxq;
6024         struct rte_eth_dev *dev = pf->adapter->eth_dev;
6025         uint16_t i;
6026         int ret;
6027
6028         for (i = 0; i < dev_data->nb_rx_queues; i++) {
6029                 rxq = dev_data->rx_queues[i];
6030                 /* Don't operate the queue if not configured or
6031                  * if starting only per queue */
6032                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
6033                         continue;
6034                 if (on)
6035                         ret = i40e_dev_rx_queue_start(dev, i);
6036                 else
6037                         ret = i40e_dev_rx_queue_stop(dev, i);
6038                 if (ret != I40E_SUCCESS)
6039                         return ret;
6040         }
6041
6042         return I40E_SUCCESS;
6043 }
6044
6045 /* Switch on or off all the rx/tx queues */
6046 int
6047 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
6048 {
6049         int ret;
6050
6051         if (on) {
6052                 /* enable rx queues before enabling tx queues */
6053                 ret = i40e_dev_switch_rx_queues(pf, on);
6054                 if (ret) {
6055                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
6056                         return ret;
6057                 }
6058                 ret = i40e_dev_switch_tx_queues(pf, on);
6059         } else {
6060                 /* Stop tx queues before stopping rx queues */
6061                 ret = i40e_dev_switch_tx_queues(pf, on);
6062                 if (ret) {
6063                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
6064                         return ret;
6065                 }
6066                 ret = i40e_dev_switch_rx_queues(pf, on);
6067         }
6068
6069         return ret;
6070 }
6071
6072 /* Initialize VSI for TX */
6073 static int
6074 i40e_dev_tx_init(struct i40e_pf *pf)
6075 {
6076         struct rte_eth_dev_data *data = pf->dev_data;
6077         uint16_t i;
6078         uint32_t ret = I40E_SUCCESS;
6079         struct i40e_tx_queue *txq;
6080
6081         for (i = 0; i < data->nb_tx_queues; i++) {
6082                 txq = data->tx_queues[i];
6083                 if (!txq || !txq->q_set)
6084                         continue;
6085                 ret = i40e_tx_queue_init(txq);
6086                 if (ret != I40E_SUCCESS)
6087                         break;
6088         }
6089         if (ret == I40E_SUCCESS)
6090                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6091                                      ->eth_dev);
6092
6093         return ret;
6094 }
6095
6096 /* Initialize VSI for RX */
6097 static int
6098 i40e_dev_rx_init(struct i40e_pf *pf)
6099 {
6100         struct rte_eth_dev_data *data = pf->dev_data;
6101         int ret = I40E_SUCCESS;
6102         uint16_t i;
6103         struct i40e_rx_queue *rxq;
6104
6105         i40e_pf_config_mq_rx(pf);
6106         for (i = 0; i < data->nb_rx_queues; i++) {
6107                 rxq = data->rx_queues[i];
6108                 if (!rxq || !rxq->q_set)
6109                         continue;
6110
6111                 ret = i40e_rx_queue_init(rxq);
6112                 if (ret != I40E_SUCCESS) {
6113                         PMD_DRV_LOG(ERR,
6114                                 "Failed to do RX queue initialization");
6115                         break;
6116                 }
6117         }
6118         if (ret == I40E_SUCCESS)
6119                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6120                                      ->eth_dev);
6121
6122         return ret;
6123 }
6124
6125 static int
6126 i40e_dev_rxtx_init(struct i40e_pf *pf)
6127 {
6128         int err;
6129
6130         err = i40e_dev_tx_init(pf);
6131         if (err) {
6132                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6133                 return err;
6134         }
6135         err = i40e_dev_rx_init(pf);
6136         if (err) {
6137                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6138                 return err;
6139         }
6140
6141         return err;
6142 }
6143
6144 static int
6145 i40e_vmdq_setup(struct rte_eth_dev *dev)
6146 {
6147         struct rte_eth_conf *conf = &dev->data->dev_conf;
6148         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6149         int i, err, conf_vsis, j, loop;
6150         struct i40e_vsi *vsi;
6151         struct i40e_vmdq_info *vmdq_info;
6152         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6153         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6154
6155         /*
6156          * Disable interrupt to avoid message from VF. Furthermore, it will
6157          * avoid race condition in VSI creation/destroy.
6158          */
6159         i40e_pf_disable_irq0(hw);
6160
6161         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6162                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6163                 return -ENOTSUP;
6164         }
6165
6166         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6167         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6168                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6169                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6170                         pf->max_nb_vmdq_vsi);
6171                 return -ENOTSUP;
6172         }
6173
6174         if (pf->vmdq != NULL) {
6175                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6176                 return 0;
6177         }
6178
6179         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6180                                 sizeof(*vmdq_info) * conf_vsis, 0);
6181
6182         if (pf->vmdq == NULL) {
6183                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6184                 return -ENOMEM;
6185         }
6186
6187         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6188
6189         /* Create VMDQ VSI */
6190         for (i = 0; i < conf_vsis; i++) {
6191                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6192                                 vmdq_conf->enable_loop_back);
6193                 if (vsi == NULL) {
6194                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6195                         err = -1;
6196                         goto err_vsi_setup;
6197                 }
6198                 vmdq_info = &pf->vmdq[i];
6199                 vmdq_info->pf = pf;
6200                 vmdq_info->vsi = vsi;
6201         }
6202         pf->nb_cfg_vmdq_vsi = conf_vsis;
6203
6204         /* Configure Vlan */
6205         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6206         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6207                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6208                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6209                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6210                                         vmdq_conf->pool_map[i].vlan_id, j);
6211
6212                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6213                                                 vmdq_conf->pool_map[i].vlan_id);
6214                                 if (err) {
6215                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6216                                         err = -1;
6217                                         goto err_vsi_setup;
6218                                 }
6219                         }
6220                 }
6221         }
6222
6223         i40e_pf_enable_irq0(hw);
6224
6225         return 0;
6226
6227 err_vsi_setup:
6228         for (i = 0; i < conf_vsis; i++)
6229                 if (pf->vmdq[i].vsi == NULL)
6230                         break;
6231                 else
6232                         i40e_vsi_release(pf->vmdq[i].vsi);
6233
6234         rte_free(pf->vmdq);
6235         pf->vmdq = NULL;
6236         i40e_pf_enable_irq0(hw);
6237         return err;
6238 }
6239
6240 static void
6241 i40e_stat_update_32(struct i40e_hw *hw,
6242                    uint32_t reg,
6243                    bool offset_loaded,
6244                    uint64_t *offset,
6245                    uint64_t *stat)
6246 {
6247         uint64_t new_data;
6248
6249         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6250         if (!offset_loaded)
6251                 *offset = new_data;
6252
6253         if (new_data >= *offset)
6254                 *stat = (uint64_t)(new_data - *offset);
6255         else
6256                 *stat = (uint64_t)((new_data +
6257                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6258 }
6259
6260 static void
6261 i40e_stat_update_48(struct i40e_hw *hw,
6262                    uint32_t hireg,
6263                    uint32_t loreg,
6264                    bool offset_loaded,
6265                    uint64_t *offset,
6266                    uint64_t *stat)
6267 {
6268         uint64_t new_data;
6269
6270         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6271         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6272                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6273
6274         if (!offset_loaded)
6275                 *offset = new_data;
6276
6277         if (new_data >= *offset)
6278                 *stat = new_data - *offset;
6279         else
6280                 *stat = (uint64_t)((new_data +
6281                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6282
6283         *stat &= I40E_48_BIT_MASK;
6284 }
6285
6286 /* Disable IRQ0 */
6287 void
6288 i40e_pf_disable_irq0(struct i40e_hw *hw)
6289 {
6290         /* Disable all interrupt types */
6291         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6292                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6293         I40E_WRITE_FLUSH(hw);
6294 }
6295
6296 /* Enable IRQ0 */
6297 void
6298 i40e_pf_enable_irq0(struct i40e_hw *hw)
6299 {
6300         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6301                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6302                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6303                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6304         I40E_WRITE_FLUSH(hw);
6305 }
6306
6307 static void
6308 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6309 {
6310         /* read pending request and disable first */
6311         i40e_pf_disable_irq0(hw);
6312         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6313         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6314                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6315
6316         if (no_queue)
6317                 /* Link no queues with irq0 */
6318                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6319                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6320 }
6321
6322 static void
6323 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6324 {
6325         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6326         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6327         int i;
6328         uint16_t abs_vf_id;
6329         uint32_t index, offset, val;
6330
6331         if (!pf->vfs)
6332                 return;
6333         /**
6334          * Try to find which VF trigger a reset, use absolute VF id to access
6335          * since the reg is global register.
6336          */
6337         for (i = 0; i < pf->vf_num; i++) {
6338                 abs_vf_id = hw->func_caps.vf_base_id + i;
6339                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6340                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6341                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6342                 /* VFR event occurred */
6343                 if (val & (0x1 << offset)) {
6344                         int ret;
6345
6346                         /* Clear the event first */
6347                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6348                                                         (0x1 << offset));
6349                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6350                         /**
6351                          * Only notify a VF reset event occurred,
6352                          * don't trigger another SW reset
6353                          */
6354                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6355                         if (ret != I40E_SUCCESS)
6356                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6357                 }
6358         }
6359 }
6360
6361 static void
6362 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6363 {
6364         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6365         int i;
6366
6367         for (i = 0; i < pf->vf_num; i++)
6368                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6369 }
6370
6371 static void
6372 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6373 {
6374         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6375         struct i40e_arq_event_info info;
6376         uint16_t pending, opcode;
6377         int ret;
6378
6379         info.buf_len = I40E_AQ_BUF_SZ;
6380         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6381         if (!info.msg_buf) {
6382                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6383                 return;
6384         }
6385
6386         pending = 1;
6387         while (pending) {
6388                 ret = i40e_clean_arq_element(hw, &info, &pending);
6389
6390                 if (ret != I40E_SUCCESS) {
6391                         PMD_DRV_LOG(INFO,
6392                                 "Failed to read msg from AdminQ, aq_err: %u",
6393                                 hw->aq.asq_last_status);
6394                         break;
6395                 }
6396                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6397
6398                 switch (opcode) {
6399                 case i40e_aqc_opc_send_msg_to_pf:
6400                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6401                         i40e_pf_host_handle_vf_msg(dev,
6402                                         rte_le_to_cpu_16(info.desc.retval),
6403                                         rte_le_to_cpu_32(info.desc.cookie_high),
6404                                         rte_le_to_cpu_32(info.desc.cookie_low),
6405                                         info.msg_buf,
6406                                         info.msg_len);
6407                         break;
6408                 case i40e_aqc_opc_get_link_status:
6409                         ret = i40e_dev_link_update(dev, 0);
6410                         if (!ret)
6411                                 _rte_eth_dev_callback_process(dev,
6412                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6413                         break;
6414                 default:
6415                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6416                                     opcode);
6417                         break;
6418                 }
6419         }
6420         rte_free(info.msg_buf);
6421 }
6422
6423 /**
6424  * Interrupt handler triggered by NIC  for handling
6425  * specific interrupt.
6426  *
6427  * @param handle
6428  *  Pointer to interrupt handle.
6429  * @param param
6430  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6431  *
6432  * @return
6433  *  void
6434  */
6435 static void
6436 i40e_dev_interrupt_handler(void *param)
6437 {
6438         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6439         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6440         uint32_t icr0;
6441
6442         /* Disable interrupt */
6443         i40e_pf_disable_irq0(hw);
6444
6445         /* read out interrupt causes */
6446         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6447
6448         /* No interrupt event indicated */
6449         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6450                 PMD_DRV_LOG(INFO, "No interrupt event");
6451                 goto done;
6452         }
6453         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6454                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6455         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6456                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6457         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6458                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6459         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6460                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6461         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6462                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6463         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6464                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6465         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6466                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6467
6468         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6469                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6470                 i40e_dev_handle_vfr_event(dev);
6471         }
6472         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6473                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6474                 i40e_dev_handle_aq_msg(dev);
6475         }
6476
6477 done:
6478         /* Enable interrupt */
6479         i40e_pf_enable_irq0(hw);
6480         rte_intr_enable(dev->intr_handle);
6481 }
6482
6483 int
6484 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6485                          struct i40e_macvlan_filter *filter,
6486                          int total)
6487 {
6488         int ele_num, ele_buff_size;
6489         int num, actual_num, i;
6490         uint16_t flags;
6491         int ret = I40E_SUCCESS;
6492         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6493         struct i40e_aqc_add_macvlan_element_data *req_list;
6494
6495         if (filter == NULL  || total == 0)
6496                 return I40E_ERR_PARAM;
6497         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6498         ele_buff_size = hw->aq.asq_buf_size;
6499
6500         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6501         if (req_list == NULL) {
6502                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6503                 return I40E_ERR_NO_MEMORY;
6504         }
6505
6506         num = 0;
6507         do {
6508                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6509                 memset(req_list, 0, ele_buff_size);
6510
6511                 for (i = 0; i < actual_num; i++) {
6512                         rte_memcpy(req_list[i].mac_addr,
6513                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6514                         req_list[i].vlan_tag =
6515                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6516
6517                         switch (filter[num + i].filter_type) {
6518                         case RTE_MAC_PERFECT_MATCH:
6519                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6520                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6521                                 break;
6522                         case RTE_MACVLAN_PERFECT_MATCH:
6523                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6524                                 break;
6525                         case RTE_MAC_HASH_MATCH:
6526                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6527                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6528                                 break;
6529                         case RTE_MACVLAN_HASH_MATCH:
6530                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6531                                 break;
6532                         default:
6533                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6534                                 ret = I40E_ERR_PARAM;
6535                                 goto DONE;
6536                         }
6537
6538                         req_list[i].queue_number = 0;
6539
6540                         req_list[i].flags = rte_cpu_to_le_16(flags);
6541                 }
6542
6543                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6544                                                 actual_num, NULL);
6545                 if (ret != I40E_SUCCESS) {
6546                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6547                         goto DONE;
6548                 }
6549                 num += actual_num;
6550         } while (num < total);
6551
6552 DONE:
6553         rte_free(req_list);
6554         return ret;
6555 }
6556
6557 int
6558 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6559                             struct i40e_macvlan_filter *filter,
6560                             int total)
6561 {
6562         int ele_num, ele_buff_size;
6563         int num, actual_num, i;
6564         uint16_t flags;
6565         int ret = I40E_SUCCESS;
6566         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6567         struct i40e_aqc_remove_macvlan_element_data *req_list;
6568
6569         if (filter == NULL  || total == 0)
6570                 return I40E_ERR_PARAM;
6571
6572         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6573         ele_buff_size = hw->aq.asq_buf_size;
6574
6575         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6576         if (req_list == NULL) {
6577                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6578                 return I40E_ERR_NO_MEMORY;
6579         }
6580
6581         num = 0;
6582         do {
6583                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6584                 memset(req_list, 0, ele_buff_size);
6585
6586                 for (i = 0; i < actual_num; i++) {
6587                         rte_memcpy(req_list[i].mac_addr,
6588                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6589                         req_list[i].vlan_tag =
6590                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6591
6592                         switch (filter[num + i].filter_type) {
6593                         case RTE_MAC_PERFECT_MATCH:
6594                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6595                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6596                                 break;
6597                         case RTE_MACVLAN_PERFECT_MATCH:
6598                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6599                                 break;
6600                         case RTE_MAC_HASH_MATCH:
6601                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6602                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6603                                 break;
6604                         case RTE_MACVLAN_HASH_MATCH:
6605                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6606                                 break;
6607                         default:
6608                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6609                                 ret = I40E_ERR_PARAM;
6610                                 goto DONE;
6611                         }
6612                         req_list[i].flags = rte_cpu_to_le_16(flags);
6613                 }
6614
6615                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6616                                                 actual_num, NULL);
6617                 if (ret != I40E_SUCCESS) {
6618                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6619                         goto DONE;
6620                 }
6621                 num += actual_num;
6622         } while (num < total);
6623
6624 DONE:
6625         rte_free(req_list);
6626         return ret;
6627 }
6628
6629 /* Find out specific MAC filter */
6630 static struct i40e_mac_filter *
6631 i40e_find_mac_filter(struct i40e_vsi *vsi,
6632                          struct ether_addr *macaddr)
6633 {
6634         struct i40e_mac_filter *f;
6635
6636         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6637                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6638                         return f;
6639         }
6640
6641         return NULL;
6642 }
6643
6644 static bool
6645 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6646                          uint16_t vlan_id)
6647 {
6648         uint32_t vid_idx, vid_bit;
6649
6650         if (vlan_id > ETH_VLAN_ID_MAX)
6651                 return 0;
6652
6653         vid_idx = I40E_VFTA_IDX(vlan_id);
6654         vid_bit = I40E_VFTA_BIT(vlan_id);
6655
6656         if (vsi->vfta[vid_idx] & vid_bit)
6657                 return 1;
6658         else
6659                 return 0;
6660 }
6661
6662 static void
6663 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6664                        uint16_t vlan_id, bool on)
6665 {
6666         uint32_t vid_idx, vid_bit;
6667
6668         vid_idx = I40E_VFTA_IDX(vlan_id);
6669         vid_bit = I40E_VFTA_BIT(vlan_id);
6670
6671         if (on)
6672                 vsi->vfta[vid_idx] |= vid_bit;
6673         else
6674                 vsi->vfta[vid_idx] &= ~vid_bit;
6675 }
6676
6677 void
6678 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6679                      uint16_t vlan_id, bool on)
6680 {
6681         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6682         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6683         int ret;
6684
6685         if (vlan_id > ETH_VLAN_ID_MAX)
6686                 return;
6687
6688         i40e_store_vlan_filter(vsi, vlan_id, on);
6689
6690         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6691                 return;
6692
6693         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6694
6695         if (on) {
6696                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6697                                        &vlan_data, 1, NULL);
6698                 if (ret != I40E_SUCCESS)
6699                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6700         } else {
6701                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6702                                           &vlan_data, 1, NULL);
6703                 if (ret != I40E_SUCCESS)
6704                         PMD_DRV_LOG(ERR,
6705                                     "Failed to remove vlan filter");
6706         }
6707 }
6708
6709 /**
6710  * Find all vlan options for specific mac addr,
6711  * return with actual vlan found.
6712  */
6713 int
6714 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6715                            struct i40e_macvlan_filter *mv_f,
6716                            int num, struct ether_addr *addr)
6717 {
6718         int i;
6719         uint32_t j, k;
6720
6721         /**
6722          * Not to use i40e_find_vlan_filter to decrease the loop time,
6723          * although the code looks complex.
6724           */
6725         if (num < vsi->vlan_num)
6726                 return I40E_ERR_PARAM;
6727
6728         i = 0;
6729         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6730                 if (vsi->vfta[j]) {
6731                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6732                                 if (vsi->vfta[j] & (1 << k)) {
6733                                         if (i > num - 1) {
6734                                                 PMD_DRV_LOG(ERR,
6735                                                         "vlan number doesn't match");
6736                                                 return I40E_ERR_PARAM;
6737                                         }
6738                                         rte_memcpy(&mv_f[i].macaddr,
6739                                                         addr, ETH_ADDR_LEN);
6740                                         mv_f[i].vlan_id =
6741                                                 j * I40E_UINT32_BIT_SIZE + k;
6742                                         i++;
6743                                 }
6744                         }
6745                 }
6746         }
6747         return I40E_SUCCESS;
6748 }
6749
6750 static inline int
6751 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6752                            struct i40e_macvlan_filter *mv_f,
6753                            int num,
6754                            uint16_t vlan)
6755 {
6756         int i = 0;
6757         struct i40e_mac_filter *f;
6758
6759         if (num < vsi->mac_num)
6760                 return I40E_ERR_PARAM;
6761
6762         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6763                 if (i > num - 1) {
6764                         PMD_DRV_LOG(ERR, "buffer number not match");
6765                         return I40E_ERR_PARAM;
6766                 }
6767                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6768                                 ETH_ADDR_LEN);
6769                 mv_f[i].vlan_id = vlan;
6770                 mv_f[i].filter_type = f->mac_info.filter_type;
6771                 i++;
6772         }
6773
6774         return I40E_SUCCESS;
6775 }
6776
6777 static int
6778 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6779 {
6780         int i, j, num;
6781         struct i40e_mac_filter *f;
6782         struct i40e_macvlan_filter *mv_f;
6783         int ret = I40E_SUCCESS;
6784
6785         if (vsi == NULL || vsi->mac_num == 0)
6786                 return I40E_ERR_PARAM;
6787
6788         /* Case that no vlan is set */
6789         if (vsi->vlan_num == 0)
6790                 num = vsi->mac_num;
6791         else
6792                 num = vsi->mac_num * vsi->vlan_num;
6793
6794         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6795         if (mv_f == NULL) {
6796                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6797                 return I40E_ERR_NO_MEMORY;
6798         }
6799
6800         i = 0;
6801         if (vsi->vlan_num == 0) {
6802                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6803                         rte_memcpy(&mv_f[i].macaddr,
6804                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6805                         mv_f[i].filter_type = f->mac_info.filter_type;
6806                         mv_f[i].vlan_id = 0;
6807                         i++;
6808                 }
6809         } else {
6810                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6811                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6812                                         vsi->vlan_num, &f->mac_info.mac_addr);
6813                         if (ret != I40E_SUCCESS)
6814                                 goto DONE;
6815                         for (j = i; j < i + vsi->vlan_num; j++)
6816                                 mv_f[j].filter_type = f->mac_info.filter_type;
6817                         i += vsi->vlan_num;
6818                 }
6819         }
6820
6821         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6822 DONE:
6823         rte_free(mv_f);
6824
6825         return ret;
6826 }
6827
6828 int
6829 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6830 {
6831         struct i40e_macvlan_filter *mv_f;
6832         int mac_num;
6833         int ret = I40E_SUCCESS;
6834
6835         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6836                 return I40E_ERR_PARAM;
6837
6838         /* If it's already set, just return */
6839         if (i40e_find_vlan_filter(vsi,vlan))
6840                 return I40E_SUCCESS;
6841
6842         mac_num = vsi->mac_num;
6843
6844         if (mac_num == 0) {
6845                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6846                 return I40E_ERR_PARAM;
6847         }
6848
6849         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6850
6851         if (mv_f == NULL) {
6852                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6853                 return I40E_ERR_NO_MEMORY;
6854         }
6855
6856         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6857
6858         if (ret != I40E_SUCCESS)
6859                 goto DONE;
6860
6861         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6862
6863         if (ret != I40E_SUCCESS)
6864                 goto DONE;
6865
6866         i40e_set_vlan_filter(vsi, vlan, 1);
6867
6868         vsi->vlan_num++;
6869         ret = I40E_SUCCESS;
6870 DONE:
6871         rte_free(mv_f);
6872         return ret;
6873 }
6874
6875 int
6876 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6877 {
6878         struct i40e_macvlan_filter *mv_f;
6879         int mac_num;
6880         int ret = I40E_SUCCESS;
6881
6882         /**
6883          * Vlan 0 is the generic filter for untagged packets
6884          * and can't be removed.
6885          */
6886         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6887                 return I40E_ERR_PARAM;
6888
6889         /* If can't find it, just return */
6890         if (!i40e_find_vlan_filter(vsi, vlan))
6891                 return I40E_ERR_PARAM;
6892
6893         mac_num = vsi->mac_num;
6894
6895         if (mac_num == 0) {
6896                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6897                 return I40E_ERR_PARAM;
6898         }
6899
6900         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6901
6902         if (mv_f == NULL) {
6903                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6904                 return I40E_ERR_NO_MEMORY;
6905         }
6906
6907         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6908
6909         if (ret != I40E_SUCCESS)
6910                 goto DONE;
6911
6912         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6913
6914         if (ret != I40E_SUCCESS)
6915                 goto DONE;
6916
6917         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6918         if (vsi->vlan_num == 1) {
6919                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6920                 if (ret != I40E_SUCCESS)
6921                         goto DONE;
6922
6923                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6924                 if (ret != I40E_SUCCESS)
6925                         goto DONE;
6926         }
6927
6928         i40e_set_vlan_filter(vsi, vlan, 0);
6929
6930         vsi->vlan_num--;
6931         ret = I40E_SUCCESS;
6932 DONE:
6933         rte_free(mv_f);
6934         return ret;
6935 }
6936
6937 int
6938 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6939 {
6940         struct i40e_mac_filter *f;
6941         struct i40e_macvlan_filter *mv_f;
6942         int i, vlan_num = 0;
6943         int ret = I40E_SUCCESS;
6944
6945         /* If it's add and we've config it, return */
6946         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6947         if (f != NULL)
6948                 return I40E_SUCCESS;
6949         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6950                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6951
6952                 /**
6953                  * If vlan_num is 0, that's the first time to add mac,
6954                  * set mask for vlan_id 0.
6955                  */
6956                 if (vsi->vlan_num == 0) {
6957                         i40e_set_vlan_filter(vsi, 0, 1);
6958                         vsi->vlan_num = 1;
6959                 }
6960                 vlan_num = vsi->vlan_num;
6961         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6962                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6963                 vlan_num = 1;
6964
6965         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6966         if (mv_f == NULL) {
6967                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6968                 return I40E_ERR_NO_MEMORY;
6969         }
6970
6971         for (i = 0; i < vlan_num; i++) {
6972                 mv_f[i].filter_type = mac_filter->filter_type;
6973                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6974                                 ETH_ADDR_LEN);
6975         }
6976
6977         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6978                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6979                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6980                                         &mac_filter->mac_addr);
6981                 if (ret != I40E_SUCCESS)
6982                         goto DONE;
6983         }
6984
6985         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6986         if (ret != I40E_SUCCESS)
6987                 goto DONE;
6988
6989         /* Add the mac addr into mac list */
6990         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6991         if (f == NULL) {
6992                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6993                 ret = I40E_ERR_NO_MEMORY;
6994                 goto DONE;
6995         }
6996         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6997                         ETH_ADDR_LEN);
6998         f->mac_info.filter_type = mac_filter->filter_type;
6999         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7000         vsi->mac_num++;
7001
7002         ret = I40E_SUCCESS;
7003 DONE:
7004         rte_free(mv_f);
7005
7006         return ret;
7007 }
7008
7009 int
7010 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
7011 {
7012         struct i40e_mac_filter *f;
7013         struct i40e_macvlan_filter *mv_f;
7014         int i, vlan_num;
7015         enum rte_mac_filter_type filter_type;
7016         int ret = I40E_SUCCESS;
7017
7018         /* Can't find it, return an error */
7019         f = i40e_find_mac_filter(vsi, addr);
7020         if (f == NULL)
7021                 return I40E_ERR_PARAM;
7022
7023         vlan_num = vsi->vlan_num;
7024         filter_type = f->mac_info.filter_type;
7025         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7026                 filter_type == RTE_MACVLAN_HASH_MATCH) {
7027                 if (vlan_num == 0) {
7028                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7029                         return I40E_ERR_PARAM;
7030                 }
7031         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7032                         filter_type == RTE_MAC_HASH_MATCH)
7033                 vlan_num = 1;
7034
7035         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7036         if (mv_f == NULL) {
7037                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7038                 return I40E_ERR_NO_MEMORY;
7039         }
7040
7041         for (i = 0; i < vlan_num; i++) {
7042                 mv_f[i].filter_type = filter_type;
7043                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7044                                 ETH_ADDR_LEN);
7045         }
7046         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7047                         filter_type == RTE_MACVLAN_HASH_MATCH) {
7048                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7049                 if (ret != I40E_SUCCESS)
7050                         goto DONE;
7051         }
7052
7053         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7054         if (ret != I40E_SUCCESS)
7055                 goto DONE;
7056
7057         /* Remove the mac addr into mac list */
7058         TAILQ_REMOVE(&vsi->mac_list, f, next);
7059         rte_free(f);
7060         vsi->mac_num--;
7061
7062         ret = I40E_SUCCESS;
7063 DONE:
7064         rte_free(mv_f);
7065         return ret;
7066 }
7067
7068 /* Configure hash enable flags for RSS */
7069 uint64_t
7070 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7071 {
7072         uint64_t hena = 0;
7073         int i;
7074
7075         if (!flags)
7076                 return hena;
7077
7078         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7079                 if (flags & (1ULL << i))
7080                         hena |= adapter->pctypes_tbl[i];
7081         }
7082
7083         return hena;
7084 }
7085
7086 /* Parse the hash enable flags */
7087 uint64_t
7088 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7089 {
7090         uint64_t rss_hf = 0;
7091
7092         if (!flags)
7093                 return rss_hf;
7094         int i;
7095
7096         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7097                 if (flags & adapter->pctypes_tbl[i])
7098                         rss_hf |= (1ULL << i);
7099         }
7100         return rss_hf;
7101 }
7102
7103 /* Disable RSS */
7104 static void
7105 i40e_pf_disable_rss(struct i40e_pf *pf)
7106 {
7107         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7108
7109         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7110         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7111         I40E_WRITE_FLUSH(hw);
7112 }
7113
7114 int
7115 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7116 {
7117         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7118         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7119         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7120                            I40E_VFQF_HKEY_MAX_INDEX :
7121                            I40E_PFQF_HKEY_MAX_INDEX;
7122         int ret = 0;
7123
7124         if (!key || key_len == 0) {
7125                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7126                 return 0;
7127         } else if (key_len != (key_idx + 1) *
7128                 sizeof(uint32_t)) {
7129                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7130                 return -EINVAL;
7131         }
7132
7133         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7134                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7135                         (struct i40e_aqc_get_set_rss_key_data *)key;
7136
7137                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7138                 if (ret)
7139                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7140         } else {
7141                 uint32_t *hash_key = (uint32_t *)key;
7142                 uint16_t i;
7143
7144                 if (vsi->type == I40E_VSI_SRIOV) {
7145                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7146                                 I40E_WRITE_REG(
7147                                         hw,
7148                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7149                                         hash_key[i]);
7150
7151                 } else {
7152                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7153                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7154                                                hash_key[i]);
7155                 }
7156                 I40E_WRITE_FLUSH(hw);
7157         }
7158
7159         return ret;
7160 }
7161
7162 static int
7163 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7164 {
7165         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7166         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7167         uint32_t reg;
7168         int ret;
7169
7170         if (!key || !key_len)
7171                 return -EINVAL;
7172
7173         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7174                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7175                         (struct i40e_aqc_get_set_rss_key_data *)key);
7176                 if (ret) {
7177                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7178                         return ret;
7179                 }
7180         } else {
7181                 uint32_t *key_dw = (uint32_t *)key;
7182                 uint16_t i;
7183
7184                 if (vsi->type == I40E_VSI_SRIOV) {
7185                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7186                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7187                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7188                         }
7189                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7190                                    sizeof(uint32_t);
7191                 } else {
7192                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7193                                 reg = I40E_PFQF_HKEY(i);
7194                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7195                         }
7196                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7197                                    sizeof(uint32_t);
7198                 }
7199         }
7200         return 0;
7201 }
7202
7203 static int
7204 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7205 {
7206         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7207         uint64_t hena;
7208         int ret;
7209
7210         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7211                                rss_conf->rss_key_len);
7212         if (ret)
7213                 return ret;
7214
7215         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7216         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7217         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7218         I40E_WRITE_FLUSH(hw);
7219
7220         return 0;
7221 }
7222
7223 static int
7224 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7225                          struct rte_eth_rss_conf *rss_conf)
7226 {
7227         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7228         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7229         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7230         uint64_t hena;
7231
7232         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7233         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7234
7235         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7236                 if (rss_hf != 0) /* Enable RSS */
7237                         return -EINVAL;
7238                 return 0; /* Nothing to do */
7239         }
7240         /* RSS enabled */
7241         if (rss_hf == 0) /* Disable RSS */
7242                 return -EINVAL;
7243
7244         return i40e_hw_rss_hash_set(pf, rss_conf);
7245 }
7246
7247 static int
7248 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7249                            struct rte_eth_rss_conf *rss_conf)
7250 {
7251         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7252         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7253         uint64_t hena;
7254
7255         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7256                          &rss_conf->rss_key_len);
7257
7258         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7259         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7260         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7261
7262         return 0;
7263 }
7264
7265 static int
7266 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7267 {
7268         switch (filter_type) {
7269         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7270                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7271                 break;
7272         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7273                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7274                 break;
7275         case RTE_TUNNEL_FILTER_IMAC_TENID:
7276                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7277                 break;
7278         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7279                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7280                 break;
7281         case ETH_TUNNEL_FILTER_IMAC:
7282                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7283                 break;
7284         case ETH_TUNNEL_FILTER_OIP:
7285                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7286                 break;
7287         case ETH_TUNNEL_FILTER_IIP:
7288                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7289                 break;
7290         default:
7291                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7292                 return -EINVAL;
7293         }
7294
7295         return 0;
7296 }
7297
7298 /* Convert tunnel filter structure */
7299 static int
7300 i40e_tunnel_filter_convert(
7301         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7302         struct i40e_tunnel_filter *tunnel_filter)
7303 {
7304         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7305                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
7306         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7307                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
7308         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7309         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7310              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7311             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7312                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7313         else
7314                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7315         tunnel_filter->input.flags = cld_filter->element.flags;
7316         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7317         tunnel_filter->queue = cld_filter->element.queue_number;
7318         rte_memcpy(tunnel_filter->input.general_fields,
7319                    cld_filter->general_fields,
7320                    sizeof(cld_filter->general_fields));
7321
7322         return 0;
7323 }
7324
7325 /* Check if there exists the tunnel filter */
7326 struct i40e_tunnel_filter *
7327 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7328                              const struct i40e_tunnel_filter_input *input)
7329 {
7330         int ret;
7331
7332         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7333         if (ret < 0)
7334                 return NULL;
7335
7336         return tunnel_rule->hash_map[ret];
7337 }
7338
7339 /* Add a tunnel filter into the SW list */
7340 static int
7341 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7342                              struct i40e_tunnel_filter *tunnel_filter)
7343 {
7344         struct i40e_tunnel_rule *rule = &pf->tunnel;
7345         int ret;
7346
7347         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7348         if (ret < 0) {
7349                 PMD_DRV_LOG(ERR,
7350                             "Failed to insert tunnel filter to hash table %d!",
7351                             ret);
7352                 return ret;
7353         }
7354         rule->hash_map[ret] = tunnel_filter;
7355
7356         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7357
7358         return 0;
7359 }
7360
7361 /* Delete a tunnel filter from the SW list */
7362 int
7363 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7364                           struct i40e_tunnel_filter_input *input)
7365 {
7366         struct i40e_tunnel_rule *rule = &pf->tunnel;
7367         struct i40e_tunnel_filter *tunnel_filter;
7368         int ret;
7369
7370         ret = rte_hash_del_key(rule->hash_table, input);
7371         if (ret < 0) {
7372                 PMD_DRV_LOG(ERR,
7373                             "Failed to delete tunnel filter to hash table %d!",
7374                             ret);
7375                 return ret;
7376         }
7377         tunnel_filter = rule->hash_map[ret];
7378         rule->hash_map[ret] = NULL;
7379
7380         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7381         rte_free(tunnel_filter);
7382
7383         return 0;
7384 }
7385
7386 int
7387 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7388                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7389                         uint8_t add)
7390 {
7391         uint16_t ip_type;
7392         uint32_t ipv4_addr, ipv4_addr_le;
7393         uint8_t i, tun_type = 0;
7394         /* internal varialbe to convert ipv6 byte order */
7395         uint32_t convert_ipv6[4];
7396         int val, ret = 0;
7397         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7398         struct i40e_vsi *vsi = pf->main_vsi;
7399         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7400         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7401         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7402         struct i40e_tunnel_filter *tunnel, *node;
7403         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7404
7405         cld_filter = rte_zmalloc("tunnel_filter",
7406                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7407         0);
7408
7409         if (NULL == cld_filter) {
7410                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7411                 return -ENOMEM;
7412         }
7413         pfilter = cld_filter;
7414
7415         ether_addr_copy(&tunnel_filter->outer_mac,
7416                         (struct ether_addr *)&pfilter->element.outer_mac);
7417         ether_addr_copy(&tunnel_filter->inner_mac,
7418                         (struct ether_addr *)&pfilter->element.inner_mac);
7419
7420         pfilter->element.inner_vlan =
7421                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7422         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7423                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7424                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7425                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7426                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7427                                 &ipv4_addr_le,
7428                                 sizeof(pfilter->element.ipaddr.v4.data));
7429         } else {
7430                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7431                 for (i = 0; i < 4; i++) {
7432                         convert_ipv6[i] =
7433                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7434                 }
7435                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7436                            &convert_ipv6,
7437                            sizeof(pfilter->element.ipaddr.v6.data));
7438         }
7439
7440         /* check tunneled type */
7441         switch (tunnel_filter->tunnel_type) {
7442         case RTE_TUNNEL_TYPE_VXLAN:
7443                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7444                 break;
7445         case RTE_TUNNEL_TYPE_NVGRE:
7446                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7447                 break;
7448         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7449                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7450                 break;
7451         default:
7452                 /* Other tunnel types is not supported. */
7453                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7454                 rte_free(cld_filter);
7455                 return -EINVAL;
7456         }
7457
7458         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7459                                        &pfilter->element.flags);
7460         if (val < 0) {
7461                 rte_free(cld_filter);
7462                 return -EINVAL;
7463         }
7464
7465         pfilter->element.flags |= rte_cpu_to_le_16(
7466                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7467                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7468         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7469         pfilter->element.queue_number =
7470                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7471
7472         /* Check if there is the filter in SW list */
7473         memset(&check_filter, 0, sizeof(check_filter));
7474         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7475         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7476         if (add && node) {
7477                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7478                 rte_free(cld_filter);
7479                 return -EINVAL;
7480         }
7481
7482         if (!add && !node) {
7483                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7484                 rte_free(cld_filter);
7485                 return -EINVAL;
7486         }
7487
7488         if (add) {
7489                 ret = i40e_aq_add_cloud_filters(hw,
7490                                         vsi->seid, &cld_filter->element, 1);
7491                 if (ret < 0) {
7492                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7493                         rte_free(cld_filter);
7494                         return -ENOTSUP;
7495                 }
7496                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7497                 if (tunnel == NULL) {
7498                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7499                         rte_free(cld_filter);
7500                         return -ENOMEM;
7501                 }
7502
7503                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7504                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7505                 if (ret < 0)
7506                         rte_free(tunnel);
7507         } else {
7508                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7509                                                    &cld_filter->element, 1);
7510                 if (ret < 0) {
7511                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7512                         rte_free(cld_filter);
7513                         return -ENOTSUP;
7514                 }
7515                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7516         }
7517
7518         rte_free(cld_filter);
7519         return ret;
7520 }
7521
7522 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7523 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7524 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7525 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7526 #define I40E_TR_GRE_KEY_MASK                    0x400
7527 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7528 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7529
7530 static enum
7531 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7532 {
7533         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7534         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7535         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7536         enum i40e_status_code status = I40E_SUCCESS;
7537
7538         if (pf->support_multi_driver) {
7539                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7540                 return I40E_NOT_SUPPORTED;
7541         }
7542
7543         memset(&filter_replace, 0,
7544                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7545         memset(&filter_replace_buf, 0,
7546                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7547
7548         /* create L1 filter */
7549         filter_replace.old_filter_type =
7550                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7551         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7552         filter_replace.tr_bit = 0;
7553
7554         /* Prepare the buffer, 3 entries */
7555         filter_replace_buf.data[0] =
7556                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7557         filter_replace_buf.data[0] |=
7558                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7559         filter_replace_buf.data[2] = 0xFF;
7560         filter_replace_buf.data[3] = 0xFF;
7561         filter_replace_buf.data[4] =
7562                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7563         filter_replace_buf.data[4] |=
7564                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7565         filter_replace_buf.data[7] = 0xF0;
7566         filter_replace_buf.data[8]
7567                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7568         filter_replace_buf.data[8] |=
7569                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7570         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7571                 I40E_TR_GENEVE_KEY_MASK |
7572                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7573         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7574                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7575                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7576
7577         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7578                                                &filter_replace_buf);
7579         if (!status) {
7580                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7581                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7582                             "cloud l1 type is changed from 0x%x to 0x%x",
7583                             filter_replace.old_filter_type,
7584                             filter_replace.new_filter_type);
7585         }
7586         return status;
7587 }
7588
7589 static enum
7590 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7591 {
7592         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7593         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7594         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7595         enum i40e_status_code status = I40E_SUCCESS;
7596
7597         if (pf->support_multi_driver) {
7598                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7599                 return I40E_NOT_SUPPORTED;
7600         }
7601
7602         /* For MPLSoUDP */
7603         memset(&filter_replace, 0,
7604                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7605         memset(&filter_replace_buf, 0,
7606                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7607         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7608                 I40E_AQC_MIRROR_CLOUD_FILTER;
7609         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7610         filter_replace.new_filter_type =
7611                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7612         /* Prepare the buffer, 2 entries */
7613         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7614         filter_replace_buf.data[0] |=
7615                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7616         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7617         filter_replace_buf.data[4] |=
7618                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7619         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7620                                                &filter_replace_buf);
7621         if (status < 0)
7622                 return status;
7623         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7624                     "cloud filter type is changed from 0x%x to 0x%x",
7625                     filter_replace.old_filter_type,
7626                     filter_replace.new_filter_type);
7627
7628         /* For MPLSoGRE */
7629         memset(&filter_replace, 0,
7630                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7631         memset(&filter_replace_buf, 0,
7632                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7633
7634         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7635                 I40E_AQC_MIRROR_CLOUD_FILTER;
7636         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7637         filter_replace.new_filter_type =
7638                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7639         /* Prepare the buffer, 2 entries */
7640         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7641         filter_replace_buf.data[0] |=
7642                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7643         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7644         filter_replace_buf.data[4] |=
7645                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7646
7647         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7648                                                &filter_replace_buf);
7649         if (!status) {
7650                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7651                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7652                             "cloud filter type is changed from 0x%x to 0x%x",
7653                             filter_replace.old_filter_type,
7654                             filter_replace.new_filter_type);
7655         }
7656         return status;
7657 }
7658
7659 static enum i40e_status_code
7660 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7661 {
7662         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7663         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7664         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7665         enum i40e_status_code status = I40E_SUCCESS;
7666
7667         if (pf->support_multi_driver) {
7668                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7669                 return I40E_NOT_SUPPORTED;
7670         }
7671
7672         /* For GTP-C */
7673         memset(&filter_replace, 0,
7674                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7675         memset(&filter_replace_buf, 0,
7676                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7677         /* create L1 filter */
7678         filter_replace.old_filter_type =
7679                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7680         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7681         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7682                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7683         /* Prepare the buffer, 2 entries */
7684         filter_replace_buf.data[0] =
7685                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7686         filter_replace_buf.data[0] |=
7687                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7688         filter_replace_buf.data[2] = 0xFF;
7689         filter_replace_buf.data[3] = 0xFF;
7690         filter_replace_buf.data[4] =
7691                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7692         filter_replace_buf.data[4] |=
7693                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7694         filter_replace_buf.data[6] = 0xFF;
7695         filter_replace_buf.data[7] = 0xFF;
7696         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7697                                                &filter_replace_buf);
7698         if (status < 0)
7699                 return status;
7700         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7701                     "cloud l1 type is changed from 0x%x to 0x%x",
7702                     filter_replace.old_filter_type,
7703                     filter_replace.new_filter_type);
7704
7705         /* for GTP-U */
7706         memset(&filter_replace, 0,
7707                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7708         memset(&filter_replace_buf, 0,
7709                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7710         /* create L1 filter */
7711         filter_replace.old_filter_type =
7712                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7713         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7714         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7715                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7716         /* Prepare the buffer, 2 entries */
7717         filter_replace_buf.data[0] =
7718                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7719         filter_replace_buf.data[0] |=
7720                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7721         filter_replace_buf.data[2] = 0xFF;
7722         filter_replace_buf.data[3] = 0xFF;
7723         filter_replace_buf.data[4] =
7724                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7725         filter_replace_buf.data[4] |=
7726                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7727         filter_replace_buf.data[6] = 0xFF;
7728         filter_replace_buf.data[7] = 0xFF;
7729
7730         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7731                                                &filter_replace_buf);
7732         if (!status) {
7733                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7734                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7735                             "cloud l1 type is changed from 0x%x to 0x%x",
7736                             filter_replace.old_filter_type,
7737                             filter_replace.new_filter_type);
7738         }
7739         return status;
7740 }
7741
7742 static enum
7743 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7744 {
7745         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7746         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7747         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7748         enum i40e_status_code status = I40E_SUCCESS;
7749
7750         if (pf->support_multi_driver) {
7751                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7752                 return I40E_NOT_SUPPORTED;
7753         }
7754
7755         /* for GTP-C */
7756         memset(&filter_replace, 0,
7757                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7758         memset(&filter_replace_buf, 0,
7759                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7760         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7761         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7762         filter_replace.new_filter_type =
7763                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7764         /* Prepare the buffer, 2 entries */
7765         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7766         filter_replace_buf.data[0] |=
7767                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7768         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7769         filter_replace_buf.data[4] |=
7770                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7771         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7772                                                &filter_replace_buf);
7773         if (status < 0)
7774                 return status;
7775         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7776                     "cloud filter type is changed from 0x%x to 0x%x",
7777                     filter_replace.old_filter_type,
7778                     filter_replace.new_filter_type);
7779
7780         /* for GTP-U */
7781         memset(&filter_replace, 0,
7782                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7783         memset(&filter_replace_buf, 0,
7784                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7785         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7786         filter_replace.old_filter_type =
7787                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7788         filter_replace.new_filter_type =
7789                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7790         /* Prepare the buffer, 2 entries */
7791         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7792         filter_replace_buf.data[0] |=
7793                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7794         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7795         filter_replace_buf.data[4] |=
7796                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7797
7798         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7799                                                &filter_replace_buf);
7800         if (!status) {
7801                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7802                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7803                             "cloud filter type is changed from 0x%x to 0x%x",
7804                             filter_replace.old_filter_type,
7805                             filter_replace.new_filter_type);
7806         }
7807         return status;
7808 }
7809
7810 int
7811 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7812                       struct i40e_tunnel_filter_conf *tunnel_filter,
7813                       uint8_t add)
7814 {
7815         uint16_t ip_type;
7816         uint32_t ipv4_addr, ipv4_addr_le;
7817         uint8_t i, tun_type = 0;
7818         /* internal variable to convert ipv6 byte order */
7819         uint32_t convert_ipv6[4];
7820         int val, ret = 0;
7821         struct i40e_pf_vf *vf = NULL;
7822         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7823         struct i40e_vsi *vsi;
7824         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7825         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7826         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7827         struct i40e_tunnel_filter *tunnel, *node;
7828         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7829         uint32_t teid_le;
7830         bool big_buffer = 0;
7831
7832         cld_filter = rte_zmalloc("tunnel_filter",
7833                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7834                          0);
7835
7836         if (cld_filter == NULL) {
7837                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7838                 return -ENOMEM;
7839         }
7840         pfilter = cld_filter;
7841
7842         ether_addr_copy(&tunnel_filter->outer_mac,
7843                         (struct ether_addr *)&pfilter->element.outer_mac);
7844         ether_addr_copy(&tunnel_filter->inner_mac,
7845                         (struct ether_addr *)&pfilter->element.inner_mac);
7846
7847         pfilter->element.inner_vlan =
7848                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7849         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7850                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7851                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7852                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7853                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7854                                 &ipv4_addr_le,
7855                                 sizeof(pfilter->element.ipaddr.v4.data));
7856         } else {
7857                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7858                 for (i = 0; i < 4; i++) {
7859                         convert_ipv6[i] =
7860                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7861                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7862                 }
7863                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7864                            &convert_ipv6,
7865                            sizeof(pfilter->element.ipaddr.v6.data));
7866         }
7867
7868         /* check tunneled type */
7869         switch (tunnel_filter->tunnel_type) {
7870         case I40E_TUNNEL_TYPE_VXLAN:
7871                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7872                 break;
7873         case I40E_TUNNEL_TYPE_NVGRE:
7874                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7875                 break;
7876         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7877                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7878                 break;
7879         case I40E_TUNNEL_TYPE_MPLSoUDP:
7880                 if (!pf->mpls_replace_flag) {
7881                         i40e_replace_mpls_l1_filter(pf);
7882                         i40e_replace_mpls_cloud_filter(pf);
7883                         pf->mpls_replace_flag = 1;
7884                 }
7885                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7886                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7887                         teid_le >> 4;
7888                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7889                         (teid_le & 0xF) << 12;
7890                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7891                         0x40;
7892                 big_buffer = 1;
7893                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7894                 break;
7895         case I40E_TUNNEL_TYPE_MPLSoGRE:
7896                 if (!pf->mpls_replace_flag) {
7897                         i40e_replace_mpls_l1_filter(pf);
7898                         i40e_replace_mpls_cloud_filter(pf);
7899                         pf->mpls_replace_flag = 1;
7900                 }
7901                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7902                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7903                         teid_le >> 4;
7904                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7905                         (teid_le & 0xF) << 12;
7906                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7907                         0x0;
7908                 big_buffer = 1;
7909                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7910                 break;
7911         case I40E_TUNNEL_TYPE_GTPC:
7912                 if (!pf->gtp_replace_flag) {
7913                         i40e_replace_gtp_l1_filter(pf);
7914                         i40e_replace_gtp_cloud_filter(pf);
7915                         pf->gtp_replace_flag = 1;
7916                 }
7917                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7918                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7919                         (teid_le >> 16) & 0xFFFF;
7920                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7921                         teid_le & 0xFFFF;
7922                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7923                         0x0;
7924                 big_buffer = 1;
7925                 break;
7926         case I40E_TUNNEL_TYPE_GTPU:
7927                 if (!pf->gtp_replace_flag) {
7928                         i40e_replace_gtp_l1_filter(pf);
7929                         i40e_replace_gtp_cloud_filter(pf);
7930                         pf->gtp_replace_flag = 1;
7931                 }
7932                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7933                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7934                         (teid_le >> 16) & 0xFFFF;
7935                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7936                         teid_le & 0xFFFF;
7937                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7938                         0x0;
7939                 big_buffer = 1;
7940                 break;
7941         case I40E_TUNNEL_TYPE_QINQ:
7942                 if (!pf->qinq_replace_flag) {
7943                         ret = i40e_cloud_filter_qinq_create(pf);
7944                         if (ret < 0)
7945                                 PMD_DRV_LOG(DEBUG,
7946                                             "QinQ tunnel filter already created.");
7947                         pf->qinq_replace_flag = 1;
7948                 }
7949                 /*      Add in the General fields the values of
7950                  *      the Outer and Inner VLAN
7951                  *      Big Buffer should be set, see changes in
7952                  *      i40e_aq_add_cloud_filters
7953                  */
7954                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7955                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7956                 big_buffer = 1;
7957                 break;
7958         default:
7959                 /* Other tunnel types is not supported. */
7960                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7961                 rte_free(cld_filter);
7962                 return -EINVAL;
7963         }
7964
7965         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7966                 pfilter->element.flags =
7967                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7968         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7969                 pfilter->element.flags =
7970                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7971         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7972                 pfilter->element.flags =
7973                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7974         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7975                 pfilter->element.flags =
7976                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7977         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7978                 pfilter->element.flags |=
7979                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7980         else {
7981                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7982                                                 &pfilter->element.flags);
7983                 if (val < 0) {
7984                         rte_free(cld_filter);
7985                         return -EINVAL;
7986                 }
7987         }
7988
7989         pfilter->element.flags |= rte_cpu_to_le_16(
7990                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7991                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7992         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7993         pfilter->element.queue_number =
7994                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7995
7996         if (!tunnel_filter->is_to_vf)
7997                 vsi = pf->main_vsi;
7998         else {
7999                 if (tunnel_filter->vf_id >= pf->vf_num) {
8000                         PMD_DRV_LOG(ERR, "Invalid argument.");
8001                         rte_free(cld_filter);
8002                         return -EINVAL;
8003                 }
8004                 vf = &pf->vfs[tunnel_filter->vf_id];
8005                 vsi = vf->vsi;
8006         }
8007
8008         /* Check if there is the filter in SW list */
8009         memset(&check_filter, 0, sizeof(check_filter));
8010         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8011         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8012         check_filter.vf_id = tunnel_filter->vf_id;
8013         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8014         if (add && node) {
8015                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8016                 rte_free(cld_filter);
8017                 return -EINVAL;
8018         }
8019
8020         if (!add && !node) {
8021                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8022                 rte_free(cld_filter);
8023                 return -EINVAL;
8024         }
8025
8026         if (add) {
8027                 if (big_buffer)
8028                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
8029                                                    vsi->seid, cld_filter, 1);
8030                 else
8031                         ret = i40e_aq_add_cloud_filters(hw,
8032                                         vsi->seid, &cld_filter->element, 1);
8033                 if (ret < 0) {
8034                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8035                         rte_free(cld_filter);
8036                         return -ENOTSUP;
8037                 }
8038                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8039                 if (tunnel == NULL) {
8040                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8041                         rte_free(cld_filter);
8042                         return -ENOMEM;
8043                 }
8044
8045                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8046                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8047                 if (ret < 0)
8048                         rte_free(tunnel);
8049         } else {
8050                 if (big_buffer)
8051                         ret = i40e_aq_remove_cloud_filters_big_buffer(
8052                                 hw, vsi->seid, cld_filter, 1);
8053                 else
8054                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
8055                                                    &cld_filter->element, 1);
8056                 if (ret < 0) {
8057                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8058                         rte_free(cld_filter);
8059                         return -ENOTSUP;
8060                 }
8061                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8062         }
8063
8064         rte_free(cld_filter);
8065         return ret;
8066 }
8067
8068 static int
8069 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8070 {
8071         uint8_t i;
8072
8073         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8074                 if (pf->vxlan_ports[i] == port)
8075                         return i;
8076         }
8077
8078         return -1;
8079 }
8080
8081 static int
8082 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
8083 {
8084         int  idx, ret;
8085         uint8_t filter_idx;
8086         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8087
8088         idx = i40e_get_vxlan_port_idx(pf, port);
8089
8090         /* Check if port already exists */
8091         if (idx >= 0) {
8092                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8093                 return -EINVAL;
8094         }
8095
8096         /* Now check if there is space to add the new port */
8097         idx = i40e_get_vxlan_port_idx(pf, 0);
8098         if (idx < 0) {
8099                 PMD_DRV_LOG(ERR,
8100                         "Maximum number of UDP ports reached, not adding port %d",
8101                         port);
8102                 return -ENOSPC;
8103         }
8104
8105         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
8106                                         &filter_idx, NULL);
8107         if (ret < 0) {
8108                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8109                 return -1;
8110         }
8111
8112         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8113                          port,  filter_idx);
8114
8115         /* New port: add it and mark its index in the bitmap */
8116         pf->vxlan_ports[idx] = port;
8117         pf->vxlan_bitmap |= (1 << idx);
8118
8119         if (!(pf->flags & I40E_FLAG_VXLAN))
8120                 pf->flags |= I40E_FLAG_VXLAN;
8121
8122         return 0;
8123 }
8124
8125 static int
8126 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8127 {
8128         int idx;
8129         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8130
8131         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8132                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8133                 return -EINVAL;
8134         }
8135
8136         idx = i40e_get_vxlan_port_idx(pf, port);
8137
8138         if (idx < 0) {
8139                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8140                 return -EINVAL;
8141         }
8142
8143         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8144                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8145                 return -1;
8146         }
8147
8148         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8149                         port, idx);
8150
8151         pf->vxlan_ports[idx] = 0;
8152         pf->vxlan_bitmap &= ~(1 << idx);
8153
8154         if (!pf->vxlan_bitmap)
8155                 pf->flags &= ~I40E_FLAG_VXLAN;
8156
8157         return 0;
8158 }
8159
8160 /* Add UDP tunneling port */
8161 static int
8162 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8163                              struct rte_eth_udp_tunnel *udp_tunnel)
8164 {
8165         int ret = 0;
8166         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8167
8168         if (udp_tunnel == NULL)
8169                 return -EINVAL;
8170
8171         switch (udp_tunnel->prot_type) {
8172         case RTE_TUNNEL_TYPE_VXLAN:
8173                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8174                 break;
8175
8176         case RTE_TUNNEL_TYPE_GENEVE:
8177         case RTE_TUNNEL_TYPE_TEREDO:
8178                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8179                 ret = -1;
8180                 break;
8181
8182         default:
8183                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8184                 ret = -1;
8185                 break;
8186         }
8187
8188         return ret;
8189 }
8190
8191 /* Remove UDP tunneling port */
8192 static int
8193 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8194                              struct rte_eth_udp_tunnel *udp_tunnel)
8195 {
8196         int ret = 0;
8197         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8198
8199         if (udp_tunnel == NULL)
8200                 return -EINVAL;
8201
8202         switch (udp_tunnel->prot_type) {
8203         case RTE_TUNNEL_TYPE_VXLAN:
8204                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8205                 break;
8206         case RTE_TUNNEL_TYPE_GENEVE:
8207         case RTE_TUNNEL_TYPE_TEREDO:
8208                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8209                 ret = -1;
8210                 break;
8211         default:
8212                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8213                 ret = -1;
8214                 break;
8215         }
8216
8217         return ret;
8218 }
8219
8220 /* Calculate the maximum number of contiguous PF queues that are configured */
8221 static int
8222 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8223 {
8224         struct rte_eth_dev_data *data = pf->dev_data;
8225         int i, num;
8226         struct i40e_rx_queue *rxq;
8227
8228         num = 0;
8229         for (i = 0; i < pf->lan_nb_qps; i++) {
8230                 rxq = data->rx_queues[i];
8231                 if (rxq && rxq->q_set)
8232                         num++;
8233                 else
8234                         break;
8235         }
8236
8237         return num;
8238 }
8239
8240 /* Configure RSS */
8241 static int
8242 i40e_pf_config_rss(struct i40e_pf *pf)
8243 {
8244         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8245         struct rte_eth_rss_conf rss_conf;
8246         uint32_t i, lut = 0;
8247         uint16_t j, num;
8248
8249         /*
8250          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8251          * It's necessary to calculate the actual PF queues that are configured.
8252          */
8253         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8254                 num = i40e_pf_calc_configured_queues_num(pf);
8255         else
8256                 num = pf->dev_data->nb_rx_queues;
8257
8258         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8259         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8260                         num);
8261
8262         if (num == 0) {
8263                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8264                 return -ENOTSUP;
8265         }
8266
8267         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8268                 if (j == num)
8269                         j = 0;
8270                 lut = (lut << 8) | (j & ((0x1 <<
8271                         hw->func_caps.rss_table_entry_width) - 1));
8272                 if ((i & 3) == 3)
8273                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8274         }
8275
8276         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8277         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8278                 i40e_pf_disable_rss(pf);
8279                 return 0;
8280         }
8281         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8282                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8283                 /* Random default keys */
8284                 static uint32_t rss_key_default[] = {0x6b793944,
8285                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8286                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8287                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8288
8289                 rss_conf.rss_key = (uint8_t *)rss_key_default;
8290                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8291                                                         sizeof(uint32_t);
8292         }
8293
8294         return i40e_hw_rss_hash_set(pf, &rss_conf);
8295 }
8296
8297 static int
8298 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8299                                struct rte_eth_tunnel_filter_conf *filter)
8300 {
8301         if (pf == NULL || filter == NULL) {
8302                 PMD_DRV_LOG(ERR, "Invalid parameter");
8303                 return -EINVAL;
8304         }
8305
8306         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8307                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8308                 return -EINVAL;
8309         }
8310
8311         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8312                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8313                 return -EINVAL;
8314         }
8315
8316         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8317                 (is_zero_ether_addr(&filter->outer_mac))) {
8318                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8319                 return -EINVAL;
8320         }
8321
8322         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8323                 (is_zero_ether_addr(&filter->inner_mac))) {
8324                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8325                 return -EINVAL;
8326         }
8327
8328         return 0;
8329 }
8330
8331 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8332 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8333 static int
8334 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8335 {
8336         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8337         uint32_t val, reg;
8338         int ret = -EINVAL;
8339
8340         if (pf->support_multi_driver) {
8341                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8342                 return -ENOTSUP;
8343         }
8344
8345         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8346         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8347
8348         if (len == 3) {
8349                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8350         } else if (len == 4) {
8351                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8352         } else {
8353                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8354                 return ret;
8355         }
8356
8357         if (reg != val) {
8358                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8359                                                    reg, NULL);
8360                 if (ret != 0)
8361                         return ret;
8362                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8363                             "with value 0x%08x",
8364                             I40E_GL_PRS_FVBM(2), reg);
8365                 i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
8366         } else {
8367                 ret = 0;
8368         }
8369         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8370                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8371
8372         return ret;
8373 }
8374
8375 static int
8376 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8377 {
8378         int ret = -EINVAL;
8379
8380         if (!hw || !cfg)
8381                 return -EINVAL;
8382
8383         switch (cfg->cfg_type) {
8384         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8385                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8386                 break;
8387         default:
8388                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8389                 break;
8390         }
8391
8392         return ret;
8393 }
8394
8395 static int
8396 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8397                                enum rte_filter_op filter_op,
8398                                void *arg)
8399 {
8400         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8401         int ret = I40E_ERR_PARAM;
8402
8403         switch (filter_op) {
8404         case RTE_ETH_FILTER_SET:
8405                 ret = i40e_dev_global_config_set(hw,
8406                         (struct rte_eth_global_cfg *)arg);
8407                 break;
8408         default:
8409                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8410                 break;
8411         }
8412
8413         return ret;
8414 }
8415
8416 static int
8417 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8418                           enum rte_filter_op filter_op,
8419                           void *arg)
8420 {
8421         struct rte_eth_tunnel_filter_conf *filter;
8422         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8423         int ret = I40E_SUCCESS;
8424
8425         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8426
8427         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8428                 return I40E_ERR_PARAM;
8429
8430         switch (filter_op) {
8431         case RTE_ETH_FILTER_NOP:
8432                 if (!(pf->flags & I40E_FLAG_VXLAN))
8433                         ret = I40E_NOT_SUPPORTED;
8434                 break;
8435         case RTE_ETH_FILTER_ADD:
8436                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8437                 break;
8438         case RTE_ETH_FILTER_DELETE:
8439                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8440                 break;
8441         default:
8442                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8443                 ret = I40E_ERR_PARAM;
8444                 break;
8445         }
8446
8447         return ret;
8448 }
8449
8450 static int
8451 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8452 {
8453         int ret = 0;
8454         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8455
8456         /* RSS setup */
8457         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8458                 ret = i40e_pf_config_rss(pf);
8459         else
8460                 i40e_pf_disable_rss(pf);
8461
8462         return ret;
8463 }
8464
8465 /* Get the symmetric hash enable configurations per port */
8466 static void
8467 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8468 {
8469         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8470
8471         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8472 }
8473
8474 /* Set the symmetric hash enable configurations per port */
8475 static void
8476 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8477 {
8478         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8479
8480         if (enable > 0) {
8481                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8482                         PMD_DRV_LOG(INFO,
8483                                 "Symmetric hash has already been enabled");
8484                         return;
8485                 }
8486                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8487         } else {
8488                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8489                         PMD_DRV_LOG(INFO,
8490                                 "Symmetric hash has already been disabled");
8491                         return;
8492                 }
8493                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8494         }
8495         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8496         I40E_WRITE_FLUSH(hw);
8497 }
8498
8499 /*
8500  * Get global configurations of hash function type and symmetric hash enable
8501  * per flow type (pctype). Note that global configuration means it affects all
8502  * the ports on the same NIC.
8503  */
8504 static int
8505 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8506                                    struct rte_eth_hash_global_conf *g_cfg)
8507 {
8508         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8509         uint32_t reg;
8510         uint16_t i, j;
8511
8512         memset(g_cfg, 0, sizeof(*g_cfg));
8513         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8514         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8515                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8516         else
8517                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8518         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8519                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8520
8521         /*
8522          * As i40e supports less than 64 flow types, only first 64 bits need to
8523          * be checked.
8524          */
8525         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8526                 g_cfg->valid_bit_mask[i] = 0ULL;
8527                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8528         }
8529
8530         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8531
8532         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8533                 if (!adapter->pctypes_tbl[i])
8534                         continue;
8535                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8536                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8537                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8538                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8539                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8540                                         g_cfg->sym_hash_enable_mask[0] |=
8541                                                                 (1ULL << i);
8542                                 }
8543                         }
8544                 }
8545         }
8546
8547         return 0;
8548 }
8549
8550 static int
8551 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8552                               const struct rte_eth_hash_global_conf *g_cfg)
8553 {
8554         uint32_t i;
8555         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8556
8557         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8558                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8559                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8560                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8561                                                 g_cfg->hash_func);
8562                 return -EINVAL;
8563         }
8564
8565         /*
8566          * As i40e supports less than 64 flow types, only first 64 bits need to
8567          * be checked.
8568          */
8569         mask0 = g_cfg->valid_bit_mask[0];
8570         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8571                 if (i == 0) {
8572                         /* Check if any unsupported flow type configured */
8573                         if ((mask0 | i40e_mask) ^ i40e_mask)
8574                                 goto mask_err;
8575                 } else {
8576                         if (g_cfg->valid_bit_mask[i])
8577                                 goto mask_err;
8578                 }
8579         }
8580
8581         return 0;
8582
8583 mask_err:
8584         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8585
8586         return -EINVAL;
8587 }
8588
8589 /*
8590  * Set global configurations of hash function type and symmetric hash enable
8591  * per flow type (pctype). Note any modifying global configuration will affect
8592  * all the ports on the same NIC.
8593  */
8594 static int
8595 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8596                                    struct rte_eth_hash_global_conf *g_cfg)
8597 {
8598         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8599         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8600         int ret;
8601         uint16_t i, j;
8602         uint32_t reg;
8603         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8604
8605         if (pf->support_multi_driver) {
8606                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8607                 return -ENOTSUP;
8608         }
8609
8610         /* Check the input parameters */
8611         ret = i40e_hash_global_config_check(adapter, g_cfg);
8612         if (ret < 0)
8613                 return ret;
8614
8615         /*
8616          * As i40e supports less than 64 flow types, only first 64 bits need to
8617          * be configured.
8618          */
8619         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8620                 if (mask0 & (1UL << i)) {
8621                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8622                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8623
8624                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8625                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8626                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8627                                         i40e_write_global_rx_ctl(hw,
8628                                                           I40E_GLQF_HSYM(j),
8629                                                           reg);
8630                         }
8631                         i40e_global_cfg_warning(I40E_WARNING_HSYM);
8632                 }
8633         }
8634
8635         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8636         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8637                 /* Toeplitz */
8638                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8639                         PMD_DRV_LOG(DEBUG,
8640                                 "Hash function already set to Toeplitz");
8641                         goto out;
8642                 }
8643                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8644         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8645                 /* Simple XOR */
8646                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8647                         PMD_DRV_LOG(DEBUG,
8648                                 "Hash function already set to Simple XOR");
8649                         goto out;
8650                 }
8651                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8652         } else
8653                 /* Use the default, and keep it as it is */
8654                 goto out;
8655
8656         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8657         i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
8658
8659 out:
8660         I40E_WRITE_FLUSH(hw);
8661
8662         return 0;
8663 }
8664
8665 /**
8666  * Valid input sets for hash and flow director filters per PCTYPE
8667  */
8668 static uint64_t
8669 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8670                 enum rte_filter_type filter)
8671 {
8672         uint64_t valid;
8673
8674         static const uint64_t valid_hash_inset_table[] = {
8675                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8676                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8677                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8678                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8679                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8680                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8681                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8682                         I40E_INSET_FLEX_PAYLOAD,
8683                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8684                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8685                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8686                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8687                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8688                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8689                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8690                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8691                         I40E_INSET_FLEX_PAYLOAD,
8692                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8693                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8694                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8695                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8696                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8697                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8698                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8699                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8700                         I40E_INSET_FLEX_PAYLOAD,
8701                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8702                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8703                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8704                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8705                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8706                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8707                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8708                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8709                         I40E_INSET_FLEX_PAYLOAD,
8710                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8711                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8712                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8713                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8714                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8715                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8716                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8717                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8718                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8719                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8720                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8721                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8722                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8723                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8724                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8725                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8726                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8727                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8728                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8729                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8730                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8731                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8732                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8733                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8734                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8735                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8736                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8737                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8738                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8739                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8740                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8741                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8742                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8743                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8744                         I40E_INSET_FLEX_PAYLOAD,
8745                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8746                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8747                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8748                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8749                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8750                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8751                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8752                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8753                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8754                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8755                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8756                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8757                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8758                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8759                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8760                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8761                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8762                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8763                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8764                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8765                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8766                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8767                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8768                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8769                         I40E_INSET_FLEX_PAYLOAD,
8770                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8771                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8772                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8773                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8774                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8775                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8776                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8777                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8778                         I40E_INSET_FLEX_PAYLOAD,
8779                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8780                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8781                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8782                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8783                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8784                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8785                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8786                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8787                         I40E_INSET_FLEX_PAYLOAD,
8788                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8789                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8790                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8791                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8792                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8793                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8794                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8795                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8796                         I40E_INSET_FLEX_PAYLOAD,
8797                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8798                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8799                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8800                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8801                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8802                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8803                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8804                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8805                         I40E_INSET_FLEX_PAYLOAD,
8806                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8807                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8808                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8809                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8810                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8811                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8812                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8813                         I40E_INSET_FLEX_PAYLOAD,
8814                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8815                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8816                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8817                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8818                         I40E_INSET_FLEX_PAYLOAD,
8819         };
8820
8821         /**
8822          * Flow director supports only fields defined in
8823          * union rte_eth_fdir_flow.
8824          */
8825         static const uint64_t valid_fdir_inset_table[] = {
8826                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8827                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8828                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8829                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8830                 I40E_INSET_IPV4_TTL,
8831                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8832                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8833                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8834                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8835                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8836                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8837                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8838                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8839                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8840                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8841                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8842                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8843                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8844                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8845                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8846                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8847                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8848                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8849                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8850                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8851                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8852                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8853                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8854                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8855                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8856                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8857                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8858                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8859                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8860                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8861                 I40E_INSET_SCTP_VT,
8862                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8863                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8864                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8865                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8866                 I40E_INSET_IPV4_TTL,
8867                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8868                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8869                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8870                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8871                 I40E_INSET_IPV6_HOP_LIMIT,
8872                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8873                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8874                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8875                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8876                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8877                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8878                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8879                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8880                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8881                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8882                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8883                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8884                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8885                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8886                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8887                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8888                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8889                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8890                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8891                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8892                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8893                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8894                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8895                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8896                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8897                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8898                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8899                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8900                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8901                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8902                 I40E_INSET_SCTP_VT,
8903                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8904                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8905                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8906                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8907                 I40E_INSET_IPV6_HOP_LIMIT,
8908                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8909                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8910                 I40E_INSET_LAST_ETHER_TYPE,
8911         };
8912
8913         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8914                 return 0;
8915         if (filter == RTE_ETH_FILTER_HASH)
8916                 valid = valid_hash_inset_table[pctype];
8917         else
8918                 valid = valid_fdir_inset_table[pctype];
8919
8920         return valid;
8921 }
8922
8923 /**
8924  * Validate if the input set is allowed for a specific PCTYPE
8925  */
8926 int
8927 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8928                 enum rte_filter_type filter, uint64_t inset)
8929 {
8930         uint64_t valid;
8931
8932         valid = i40e_get_valid_input_set(pctype, filter);
8933         if (inset & (~valid))
8934                 return -EINVAL;
8935
8936         return 0;
8937 }
8938
8939 /* default input set fields combination per pctype */
8940 uint64_t
8941 i40e_get_default_input_set(uint16_t pctype)
8942 {
8943         static const uint64_t default_inset_table[] = {
8944                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8945                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8946                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8947                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8948                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8949                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8950                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8951                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8952                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8953                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8954                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8955                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8956                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8957                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8958                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8959                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8960                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8961                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8962                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8963                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8964                         I40E_INSET_SCTP_VT,
8965                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8966                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8967                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8968                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8969                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8970                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8971                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8972                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8973                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8974                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8975                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8976                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8977                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8978                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8979                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8980                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8981                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8982                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8983                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8984                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8985                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8986                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8987                         I40E_INSET_SCTP_VT,
8988                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8989                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8990                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8991                         I40E_INSET_LAST_ETHER_TYPE,
8992         };
8993
8994         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8995                 return 0;
8996
8997         return default_inset_table[pctype];
8998 }
8999
9000 /**
9001  * Parse the input set from index to logical bit masks
9002  */
9003 static int
9004 i40e_parse_input_set(uint64_t *inset,
9005                      enum i40e_filter_pctype pctype,
9006                      enum rte_eth_input_set_field *field,
9007                      uint16_t size)
9008 {
9009         uint16_t i, j;
9010         int ret = -EINVAL;
9011
9012         static const struct {
9013                 enum rte_eth_input_set_field field;
9014                 uint64_t inset;
9015         } inset_convert_table[] = {
9016                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9017                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9018                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9019                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9020                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9021                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9022                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9023                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9024                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9025                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9026                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9027                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9028                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9029                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9030                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9031                         I40E_INSET_IPV6_NEXT_HDR},
9032                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9033                         I40E_INSET_IPV6_HOP_LIMIT},
9034                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9035                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9036                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9037                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9038                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9039                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9040                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9041                         I40E_INSET_SCTP_VT},
9042                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9043                         I40E_INSET_TUNNEL_DMAC},
9044                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9045                         I40E_INSET_VLAN_TUNNEL},
9046                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9047                         I40E_INSET_TUNNEL_ID},
9048                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9049                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9050                         I40E_INSET_FLEX_PAYLOAD_W1},
9051                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9052                         I40E_INSET_FLEX_PAYLOAD_W2},
9053                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9054                         I40E_INSET_FLEX_PAYLOAD_W3},
9055                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9056                         I40E_INSET_FLEX_PAYLOAD_W4},
9057                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9058                         I40E_INSET_FLEX_PAYLOAD_W5},
9059                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9060                         I40E_INSET_FLEX_PAYLOAD_W6},
9061                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9062                         I40E_INSET_FLEX_PAYLOAD_W7},
9063                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9064                         I40E_INSET_FLEX_PAYLOAD_W8},
9065         };
9066
9067         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9068                 return ret;
9069
9070         /* Only one item allowed for default or all */
9071         if (size == 1) {
9072                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9073                         *inset = i40e_get_default_input_set(pctype);
9074                         return 0;
9075                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9076                         *inset = I40E_INSET_NONE;
9077                         return 0;
9078                 }
9079         }
9080
9081         for (i = 0, *inset = 0; i < size; i++) {
9082                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9083                         if (field[i] == inset_convert_table[j].field) {
9084                                 *inset |= inset_convert_table[j].inset;
9085                                 break;
9086                         }
9087                 }
9088
9089                 /* It contains unsupported input set, return immediately */
9090                 if (j == RTE_DIM(inset_convert_table))
9091                         return ret;
9092         }
9093
9094         return 0;
9095 }
9096
9097 /**
9098  * Translate the input set from bit masks to register aware bit masks
9099  * and vice versa
9100  */
9101 uint64_t
9102 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9103 {
9104         uint64_t val = 0;
9105         uint16_t i;
9106
9107         struct inset_map {
9108                 uint64_t inset;
9109                 uint64_t inset_reg;
9110         };
9111
9112         static const struct inset_map inset_map_common[] = {
9113                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9114                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9115                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9116                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9117                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9118                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9119                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9120                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9121                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9122                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9123                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9124                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9125                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9126                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9127                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9128                 {I40E_INSET_TUNNEL_DMAC,
9129                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9130                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9131                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9132                 {I40E_INSET_TUNNEL_SRC_PORT,
9133                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9134                 {I40E_INSET_TUNNEL_DST_PORT,
9135                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9136                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9137                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9138                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9139                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9140                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9141                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9142                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9143                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9144                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9145         };
9146
9147     /* some different registers map in x722*/
9148         static const struct inset_map inset_map_diff_x722[] = {
9149                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9150                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9151                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9152                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9153         };
9154
9155         static const struct inset_map inset_map_diff_not_x722[] = {
9156                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9157                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9158                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9159                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9160         };
9161
9162         if (input == 0)
9163                 return val;
9164
9165         /* Translate input set to register aware inset */
9166         if (type == I40E_MAC_X722) {
9167                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9168                         if (input & inset_map_diff_x722[i].inset)
9169                                 val |= inset_map_diff_x722[i].inset_reg;
9170                 }
9171         } else {
9172                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9173                         if (input & inset_map_diff_not_x722[i].inset)
9174                                 val |= inset_map_diff_not_x722[i].inset_reg;
9175                 }
9176         }
9177
9178         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9179                 if (input & inset_map_common[i].inset)
9180                         val |= inset_map_common[i].inset_reg;
9181         }
9182
9183         return val;
9184 }
9185
9186 int
9187 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9188 {
9189         uint8_t i, idx = 0;
9190         uint64_t inset_need_mask = inset;
9191
9192         static const struct {
9193                 uint64_t inset;
9194                 uint32_t mask;
9195         } inset_mask_map[] = {
9196                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9197                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9198                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9199                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9200                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9201                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9202                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9203                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9204         };
9205
9206         if (!inset || !mask || !nb_elem)
9207                 return 0;
9208
9209         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9210                 /* Clear the inset bit, if no MASK is required,
9211                  * for example proto + ttl
9212                  */
9213                 if ((inset & inset_mask_map[i].inset) ==
9214                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9215                         inset_need_mask &= ~inset_mask_map[i].inset;
9216                 if (!inset_need_mask)
9217                         return 0;
9218         }
9219         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9220                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9221                     inset_mask_map[i].inset) {
9222                         if (idx >= nb_elem) {
9223                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9224                                 return -EINVAL;
9225                         }
9226                         mask[idx] = inset_mask_map[i].mask;
9227                         idx++;
9228                 }
9229         }
9230
9231         return idx;
9232 }
9233
9234 void
9235 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9236 {
9237         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9238
9239         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9240         if (reg != val)
9241                 i40e_write_rx_ctl(hw, addr, val);
9242         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9243                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9244 }
9245
9246 void
9247 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9248 {
9249         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9250
9251         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9252         if (reg != val)
9253                 i40e_write_global_rx_ctl(hw, addr, val);
9254         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9255                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9256 }
9257
9258 static void
9259 i40e_filter_input_set_init(struct i40e_pf *pf)
9260 {
9261         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9262         enum i40e_filter_pctype pctype;
9263         uint64_t input_set, inset_reg;
9264         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9265         int num, i;
9266         uint16_t flow_type;
9267
9268         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9269              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9270                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9271
9272                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9273                         continue;
9274
9275                 input_set = i40e_get_default_input_set(pctype);
9276
9277                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9278                                                    I40E_INSET_MASK_NUM_REG);
9279                 if (num < 0)
9280                         return;
9281                 if (pf->support_multi_driver && num > 0) {
9282                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9283                         return;
9284                 }
9285                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9286                                         input_set);
9287
9288                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9289                                       (uint32_t)(inset_reg & UINT32_MAX));
9290                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9291                                      (uint32_t)((inset_reg >>
9292                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9293                 if (!pf->support_multi_driver) {
9294                         i40e_check_write_global_reg(hw,
9295                                             I40E_GLQF_HASH_INSET(0, pctype),
9296                                             (uint32_t)(inset_reg & UINT32_MAX));
9297                         i40e_check_write_global_reg(hw,
9298                                              I40E_GLQF_HASH_INSET(1, pctype),
9299                                              (uint32_t)((inset_reg >>
9300                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9301
9302                         for (i = 0; i < num; i++) {
9303                                 i40e_check_write_global_reg(hw,
9304                                                     I40E_GLQF_FD_MSK(i, pctype),
9305                                                     mask_reg[i]);
9306                                 i40e_check_write_global_reg(hw,
9307                                                   I40E_GLQF_HASH_MSK(i, pctype),
9308                                                   mask_reg[i]);
9309                         }
9310                         /*clear unused mask registers of the pctype */
9311                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9312                                 i40e_check_write_global_reg(hw,
9313                                                     I40E_GLQF_FD_MSK(i, pctype),
9314                                                     0);
9315                                 i40e_check_write_global_reg(hw,
9316                                                   I40E_GLQF_HASH_MSK(i, pctype),
9317                                                   0);
9318                         }
9319                 } else {
9320                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9321                 }
9322                 I40E_WRITE_FLUSH(hw);
9323
9324                 /* store the default input set */
9325                 if (!pf->support_multi_driver)
9326                         pf->hash_input_set[pctype] = input_set;
9327                 pf->fdir.input_set[pctype] = input_set;
9328         }
9329
9330         if (!pf->support_multi_driver) {
9331                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9332                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9333                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9334         }
9335 }
9336
9337 int
9338 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9339                          struct rte_eth_input_set_conf *conf)
9340 {
9341         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9342         enum i40e_filter_pctype pctype;
9343         uint64_t input_set, inset_reg = 0;
9344         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9345         int ret, i, num;
9346
9347         if (!conf) {
9348                 PMD_DRV_LOG(ERR, "Invalid pointer");
9349                 return -EFAULT;
9350         }
9351         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9352             conf->op != RTE_ETH_INPUT_SET_ADD) {
9353                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9354                 return -EINVAL;
9355         }
9356
9357         if (pf->support_multi_driver) {
9358                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9359                 return -ENOTSUP;
9360         }
9361
9362         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9363         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9364                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9365                 return -EINVAL;
9366         }
9367
9368         if (hw->mac.type == I40E_MAC_X722) {
9369                 /* get translated pctype value in fd pctype register */
9370                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9371                         I40E_GLQF_FD_PCTYPES((int)pctype));
9372         }
9373
9374         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9375                                    conf->inset_size);
9376         if (ret) {
9377                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9378                 return -EINVAL;
9379         }
9380
9381         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9382                 /* get inset value in register */
9383                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9384                 inset_reg <<= I40E_32_BIT_WIDTH;
9385                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9386                 input_set |= pf->hash_input_set[pctype];
9387         }
9388         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9389                                            I40E_INSET_MASK_NUM_REG);
9390         if (num < 0)
9391                 return -EINVAL;
9392
9393         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9394
9395         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9396                                     (uint32_t)(inset_reg & UINT32_MAX));
9397         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9398                                     (uint32_t)((inset_reg >>
9399                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9400         i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9401
9402         for (i = 0; i < num; i++)
9403                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9404                                             mask_reg[i]);
9405         /*clear unused mask registers of the pctype */
9406         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9407                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9408                                             0);
9409         i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9410         I40E_WRITE_FLUSH(hw);
9411
9412         pf->hash_input_set[pctype] = input_set;
9413         return 0;
9414 }
9415
9416 int
9417 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9418                          struct rte_eth_input_set_conf *conf)
9419 {
9420         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9421         enum i40e_filter_pctype pctype;
9422         uint64_t input_set, inset_reg = 0;
9423         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9424         int ret, i, num;
9425
9426         if (!hw || !conf) {
9427                 PMD_DRV_LOG(ERR, "Invalid pointer");
9428                 return -EFAULT;
9429         }
9430         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9431             conf->op != RTE_ETH_INPUT_SET_ADD) {
9432                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9433                 return -EINVAL;
9434         }
9435
9436         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9437
9438         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9439                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9440                 return -EINVAL;
9441         }
9442
9443         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9444                                    conf->inset_size);
9445         if (ret) {
9446                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9447                 return -EINVAL;
9448         }
9449
9450         /* get inset value in register */
9451         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9452         inset_reg <<= I40E_32_BIT_WIDTH;
9453         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9454
9455         /* Can not change the inset reg for flex payload for fdir,
9456          * it is done by writing I40E_PRTQF_FD_FLXINSET
9457          * in i40e_set_flex_mask_on_pctype.
9458          */
9459         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9460                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9461         else
9462                 input_set |= pf->fdir.input_set[pctype];
9463         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9464                                            I40E_INSET_MASK_NUM_REG);
9465         if (num < 0)
9466                 return -EINVAL;
9467         if (pf->support_multi_driver && num > 0) {
9468                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9469                 return -ENOTSUP;
9470         }
9471
9472         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9473
9474         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9475                               (uint32_t)(inset_reg & UINT32_MAX));
9476         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9477                              (uint32_t)((inset_reg >>
9478                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9479
9480         if (!pf->support_multi_driver) {
9481                 for (i = 0; i < num; i++)
9482                         i40e_check_write_global_reg(hw,
9483                                                     I40E_GLQF_FD_MSK(i, pctype),
9484                                                     mask_reg[i]);
9485                 /*clear unused mask registers of the pctype */
9486                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9487                         i40e_check_write_global_reg(hw,
9488                                                     I40E_GLQF_FD_MSK(i, pctype),
9489                                                     0);
9490                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9491         } else {
9492                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9493         }
9494         I40E_WRITE_FLUSH(hw);
9495
9496         pf->fdir.input_set[pctype] = input_set;
9497         return 0;
9498 }
9499
9500 static int
9501 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9502 {
9503         int ret = 0;
9504
9505         if (!hw || !info) {
9506                 PMD_DRV_LOG(ERR, "Invalid pointer");
9507                 return -EFAULT;
9508         }
9509
9510         switch (info->info_type) {
9511         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9512                 i40e_get_symmetric_hash_enable_per_port(hw,
9513                                         &(info->info.enable));
9514                 break;
9515         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9516                 ret = i40e_get_hash_filter_global_config(hw,
9517                                 &(info->info.global_conf));
9518                 break;
9519         default:
9520                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9521                                                         info->info_type);
9522                 ret = -EINVAL;
9523                 break;
9524         }
9525
9526         return ret;
9527 }
9528
9529 static int
9530 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9531 {
9532         int ret = 0;
9533
9534         if (!hw || !info) {
9535                 PMD_DRV_LOG(ERR, "Invalid pointer");
9536                 return -EFAULT;
9537         }
9538
9539         switch (info->info_type) {
9540         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9541                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9542                 break;
9543         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9544                 ret = i40e_set_hash_filter_global_config(hw,
9545                                 &(info->info.global_conf));
9546                 break;
9547         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9548                 ret = i40e_hash_filter_inset_select(hw,
9549                                                &(info->info.input_set_conf));
9550                 break;
9551
9552         default:
9553                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9554                                                         info->info_type);
9555                 ret = -EINVAL;
9556                 break;
9557         }
9558
9559         return ret;
9560 }
9561
9562 /* Operations for hash function */
9563 static int
9564 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9565                       enum rte_filter_op filter_op,
9566                       void *arg)
9567 {
9568         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9569         int ret = 0;
9570
9571         switch (filter_op) {
9572         case RTE_ETH_FILTER_NOP:
9573                 break;
9574         case RTE_ETH_FILTER_GET:
9575                 ret = i40e_hash_filter_get(hw,
9576                         (struct rte_eth_hash_filter_info *)arg);
9577                 break;
9578         case RTE_ETH_FILTER_SET:
9579                 ret = i40e_hash_filter_set(hw,
9580                         (struct rte_eth_hash_filter_info *)arg);
9581                 break;
9582         default:
9583                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9584                                                                 filter_op);
9585                 ret = -ENOTSUP;
9586                 break;
9587         }
9588
9589         return ret;
9590 }
9591
9592 /* Convert ethertype filter structure */
9593 static int
9594 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9595                               struct i40e_ethertype_filter *filter)
9596 {
9597         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9598         filter->input.ether_type = input->ether_type;
9599         filter->flags = input->flags;
9600         filter->queue = input->queue;
9601
9602         return 0;
9603 }
9604
9605 /* Check if there exists the ehtertype filter */
9606 struct i40e_ethertype_filter *
9607 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9608                                 const struct i40e_ethertype_filter_input *input)
9609 {
9610         int ret;
9611
9612         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9613         if (ret < 0)
9614                 return NULL;
9615
9616         return ethertype_rule->hash_map[ret];
9617 }
9618
9619 /* Add ethertype filter in SW list */
9620 static int
9621 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9622                                 struct i40e_ethertype_filter *filter)
9623 {
9624         struct i40e_ethertype_rule *rule = &pf->ethertype;
9625         int ret;
9626
9627         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9628         if (ret < 0) {
9629                 PMD_DRV_LOG(ERR,
9630                             "Failed to insert ethertype filter"
9631                             " to hash table %d!",
9632                             ret);
9633                 return ret;
9634         }
9635         rule->hash_map[ret] = filter;
9636
9637         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9638
9639         return 0;
9640 }
9641
9642 /* Delete ethertype filter in SW list */
9643 int
9644 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9645                              struct i40e_ethertype_filter_input *input)
9646 {
9647         struct i40e_ethertype_rule *rule = &pf->ethertype;
9648         struct i40e_ethertype_filter *filter;
9649         int ret;
9650
9651         ret = rte_hash_del_key(rule->hash_table, input);
9652         if (ret < 0) {
9653                 PMD_DRV_LOG(ERR,
9654                             "Failed to delete ethertype filter"
9655                             " to hash table %d!",
9656                             ret);
9657                 return ret;
9658         }
9659         filter = rule->hash_map[ret];
9660         rule->hash_map[ret] = NULL;
9661
9662         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9663         rte_free(filter);
9664
9665         return 0;
9666 }
9667
9668 /*
9669  * Configure ethertype filter, which can director packet by filtering
9670  * with mac address and ether_type or only ether_type
9671  */
9672 int
9673 i40e_ethertype_filter_set(struct i40e_pf *pf,
9674                         struct rte_eth_ethertype_filter *filter,
9675                         bool add)
9676 {
9677         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9678         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9679         struct i40e_ethertype_filter *ethertype_filter, *node;
9680         struct i40e_ethertype_filter check_filter;
9681         struct i40e_control_filter_stats stats;
9682         uint16_t flags = 0;
9683         int ret;
9684
9685         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9686                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9687                 return -EINVAL;
9688         }
9689         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9690                 filter->ether_type == ETHER_TYPE_IPv6) {
9691                 PMD_DRV_LOG(ERR,
9692                         "unsupported ether_type(0x%04x) in control packet filter.",
9693                         filter->ether_type);
9694                 return -EINVAL;
9695         }
9696         if (filter->ether_type == ETHER_TYPE_VLAN)
9697                 PMD_DRV_LOG(WARNING,
9698                         "filter vlan ether_type in first tag is not supported.");
9699
9700         /* Check if there is the filter in SW list */
9701         memset(&check_filter, 0, sizeof(check_filter));
9702         i40e_ethertype_filter_convert(filter, &check_filter);
9703         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9704                                                &check_filter.input);
9705         if (add && node) {
9706                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9707                 return -EINVAL;
9708         }
9709
9710         if (!add && !node) {
9711                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9712                 return -EINVAL;
9713         }
9714
9715         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9716                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9717         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9718                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9719         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9720
9721         memset(&stats, 0, sizeof(stats));
9722         ret = i40e_aq_add_rem_control_packet_filter(hw,
9723                         filter->mac_addr.addr_bytes,
9724                         filter->ether_type, flags,
9725                         pf->main_vsi->seid,
9726                         filter->queue, add, &stats, NULL);
9727
9728         PMD_DRV_LOG(INFO,
9729                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9730                 ret, stats.mac_etype_used, stats.etype_used,
9731                 stats.mac_etype_free, stats.etype_free);
9732         if (ret < 0)
9733                 return -ENOSYS;
9734
9735         /* Add or delete a filter in SW list */
9736         if (add) {
9737                 ethertype_filter = rte_zmalloc("ethertype_filter",
9738                                        sizeof(*ethertype_filter), 0);
9739                 if (ethertype_filter == NULL) {
9740                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9741                         return -ENOMEM;
9742                 }
9743
9744                 rte_memcpy(ethertype_filter, &check_filter,
9745                            sizeof(check_filter));
9746                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9747                 if (ret < 0)
9748                         rte_free(ethertype_filter);
9749         } else {
9750                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9751         }
9752
9753         return ret;
9754 }
9755
9756 /*
9757  * Handle operations for ethertype filter.
9758  */
9759 static int
9760 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9761                                 enum rte_filter_op filter_op,
9762                                 void *arg)
9763 {
9764         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9765         int ret = 0;
9766
9767         if (filter_op == RTE_ETH_FILTER_NOP)
9768                 return ret;
9769
9770         if (arg == NULL) {
9771                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9772                             filter_op);
9773                 return -EINVAL;
9774         }
9775
9776         switch (filter_op) {
9777         case RTE_ETH_FILTER_ADD:
9778                 ret = i40e_ethertype_filter_set(pf,
9779                         (struct rte_eth_ethertype_filter *)arg,
9780                         TRUE);
9781                 break;
9782         case RTE_ETH_FILTER_DELETE:
9783                 ret = i40e_ethertype_filter_set(pf,
9784                         (struct rte_eth_ethertype_filter *)arg,
9785                         FALSE);
9786                 break;
9787         default:
9788                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9789                 ret = -ENOSYS;
9790                 break;
9791         }
9792         return ret;
9793 }
9794
9795 static int
9796 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9797                      enum rte_filter_type filter_type,
9798                      enum rte_filter_op filter_op,
9799                      void *arg)
9800 {
9801         int ret = 0;
9802
9803         if (dev == NULL)
9804                 return -EINVAL;
9805
9806         switch (filter_type) {
9807         case RTE_ETH_FILTER_NONE:
9808                 /* For global configuration */
9809                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9810                 break;
9811         case RTE_ETH_FILTER_HASH:
9812                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9813                 break;
9814         case RTE_ETH_FILTER_MACVLAN:
9815                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9816                 break;
9817         case RTE_ETH_FILTER_ETHERTYPE:
9818                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9819                 break;
9820         case RTE_ETH_FILTER_TUNNEL:
9821                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9822                 break;
9823         case RTE_ETH_FILTER_FDIR:
9824                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9825                 break;
9826         case RTE_ETH_FILTER_GENERIC:
9827                 if (filter_op != RTE_ETH_FILTER_GET)
9828                         return -EINVAL;
9829                 *(const void **)arg = &i40e_flow_ops;
9830                 break;
9831         default:
9832                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9833                                                         filter_type);
9834                 ret = -EINVAL;
9835                 break;
9836         }
9837
9838         return ret;
9839 }
9840
9841 /*
9842  * Check and enable Extended Tag.
9843  * Enabling Extended Tag is important for 40G performance.
9844  */
9845 static void
9846 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9847 {
9848         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9849         uint32_t buf = 0;
9850         int ret;
9851
9852         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9853                                       PCI_DEV_CAP_REG);
9854         if (ret < 0) {
9855                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9856                             PCI_DEV_CAP_REG);
9857                 return;
9858         }
9859         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9860                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9861                 return;
9862         }
9863
9864         buf = 0;
9865         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9866                                       PCI_DEV_CTRL_REG);
9867         if (ret < 0) {
9868                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9869                             PCI_DEV_CTRL_REG);
9870                 return;
9871         }
9872         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9873                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9874                 return;
9875         }
9876         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9877         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9878                                        PCI_DEV_CTRL_REG);
9879         if (ret < 0) {
9880                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9881                             PCI_DEV_CTRL_REG);
9882                 return;
9883         }
9884 }
9885
9886 /*
9887  * As some registers wouldn't be reset unless a global hardware reset,
9888  * hardware initialization is needed to put those registers into an
9889  * expected initial state.
9890  */
9891 static void
9892 i40e_hw_init(struct rte_eth_dev *dev)
9893 {
9894         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9895
9896         i40e_enable_extended_tag(dev);
9897
9898         /* clear the PF Queue Filter control register */
9899         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9900
9901         /* Disable symmetric hash per port */
9902         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9903 }
9904
9905 /*
9906  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9907  * however this function will return only one highest pctype index,
9908  * which is not quite correct. This is known problem of i40e driver
9909  * and needs to be fixed later.
9910  */
9911 enum i40e_filter_pctype
9912 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9913 {
9914         int i;
9915         uint64_t pctype_mask;
9916
9917         if (flow_type < I40E_FLOW_TYPE_MAX) {
9918                 pctype_mask = adapter->pctypes_tbl[flow_type];
9919                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9920                         if (pctype_mask & (1ULL << i))
9921                                 return (enum i40e_filter_pctype)i;
9922                 }
9923         }
9924         return I40E_FILTER_PCTYPE_INVALID;
9925 }
9926
9927 uint16_t
9928 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9929                         enum i40e_filter_pctype pctype)
9930 {
9931         uint16_t flowtype;
9932         uint64_t pctype_mask = 1ULL << pctype;
9933
9934         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9935              flowtype++) {
9936                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9937                         return flowtype;
9938         }
9939
9940         return RTE_ETH_FLOW_UNKNOWN;
9941 }
9942
9943 /*
9944  * On X710, performance number is far from the expectation on recent firmware
9945  * versions; on XL710, performance number is also far from the expectation on
9946  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9947  * mode is enabled and port MAC address is equal to the packet destination MAC
9948  * address. The fix for this issue may not be integrated in the following
9949  * firmware version. So the workaround in software driver is needed. It needs
9950  * to modify the initial values of 3 internal only registers for both X710 and
9951  * XL710. Note that the values for X710 or XL710 could be different, and the
9952  * workaround can be removed when it is fixed in firmware in the future.
9953  */
9954
9955 /* For both X710 and XL710 */
9956 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9957 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9958 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9959
9960 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9961 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9962
9963 /* For X722 */
9964 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9965 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9966
9967 /* For X710 */
9968 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9969 /* For XL710 */
9970 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9971 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9972
9973 static int
9974 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9975 {
9976         enum i40e_status_code status;
9977         struct i40e_aq_get_phy_abilities_resp phy_ab;
9978         int ret = -ENOTSUP;
9979         int retries = 0;
9980
9981         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9982                                               NULL);
9983
9984         while (status) {
9985                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9986                         status);
9987                 retries++;
9988                 rte_delay_us(100000);
9989                 if  (retries < 5)
9990                         status = i40e_aq_get_phy_capabilities(hw, false,
9991                                         true, &phy_ab, NULL);
9992                 else
9993                         return ret;
9994         }
9995         return 0;
9996 }
9997
9998 static void
9999 i40e_configure_registers(struct i40e_hw *hw)
10000 {
10001         static struct {
10002                 uint32_t addr;
10003                 uint64_t val;
10004         } reg_table[] = {
10005                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10006                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10007                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10008         };
10009         uint64_t reg;
10010         uint32_t i;
10011         int ret;
10012
10013         for (i = 0; i < RTE_DIM(reg_table); i++) {
10014                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10015                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10016                                 reg_table[i].val =
10017                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10018                         else /* For X710/XL710/XXV710 */
10019                                 if (hw->aq.fw_maj_ver < 6)
10020                                         reg_table[i].val =
10021                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10022                                 else
10023                                         reg_table[i].val =
10024                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10025                 }
10026
10027                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10028                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10029                                 reg_table[i].val =
10030                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10031                         else /* For X710/XL710/XXV710 */
10032                                 reg_table[i].val =
10033                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10034                 }
10035
10036                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10037                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
10038                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
10039                                 reg_table[i].val =
10040                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
10041                         else /* For X710 */
10042                                 reg_table[i].val =
10043                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
10044                 }
10045
10046                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10047                                                         &reg, NULL);
10048                 if (ret < 0) {
10049                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10050                                                         reg_table[i].addr);
10051                         break;
10052                 }
10053                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10054                                                 reg_table[i].addr, reg);
10055                 if (reg == reg_table[i].val)
10056                         continue;
10057
10058                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10059                                                 reg_table[i].val, NULL);
10060                 if (ret < 0) {
10061                         PMD_DRV_LOG(ERR,
10062                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10063                                 reg_table[i].val, reg_table[i].addr);
10064                         break;
10065                 }
10066                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10067                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10068         }
10069 }
10070
10071 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
10072 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10073 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10074 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10075 static int
10076 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10077 {
10078         uint32_t reg;
10079         int ret;
10080
10081         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10082                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10083                 return -EINVAL;
10084         }
10085
10086         /* Configure for double VLAN RX stripping */
10087         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10088         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10089                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10090                 ret = i40e_aq_debug_write_register(hw,
10091                                                    I40E_VSI_TSR(vsi->vsi_id),
10092                                                    reg, NULL);
10093                 if (ret < 0) {
10094                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10095                                     vsi->vsi_id);
10096                         return I40E_ERR_CONFIG;
10097                 }
10098         }
10099
10100         /* Configure for double VLAN TX insertion */
10101         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10102         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10103                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10104                 ret = i40e_aq_debug_write_register(hw,
10105                                                    I40E_VSI_L2TAGSTXVALID(
10106                                                    vsi->vsi_id), reg, NULL);
10107                 if (ret < 0) {
10108                         PMD_DRV_LOG(ERR,
10109                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10110                                 vsi->vsi_id);
10111                         return I40E_ERR_CONFIG;
10112                 }
10113         }
10114
10115         return 0;
10116 }
10117
10118 /**
10119  * i40e_aq_add_mirror_rule
10120  * @hw: pointer to the hardware structure
10121  * @seid: VEB seid to add mirror rule to
10122  * @dst_id: destination vsi seid
10123  * @entries: Buffer which contains the entities to be mirrored
10124  * @count: number of entities contained in the buffer
10125  * @rule_id:the rule_id of the rule to be added
10126  *
10127  * Add a mirror rule for a given veb.
10128  *
10129  **/
10130 static enum i40e_status_code
10131 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10132                         uint16_t seid, uint16_t dst_id,
10133                         uint16_t rule_type, uint16_t *entries,
10134                         uint16_t count, uint16_t *rule_id)
10135 {
10136         struct i40e_aq_desc desc;
10137         struct i40e_aqc_add_delete_mirror_rule cmd;
10138         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10139                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10140                 &desc.params.raw;
10141         uint16_t buff_len;
10142         enum i40e_status_code status;
10143
10144         i40e_fill_default_direct_cmd_desc(&desc,
10145                                           i40e_aqc_opc_add_mirror_rule);
10146         memset(&cmd, 0, sizeof(cmd));
10147
10148         buff_len = sizeof(uint16_t) * count;
10149         desc.datalen = rte_cpu_to_le_16(buff_len);
10150         if (buff_len > 0)
10151                 desc.flags |= rte_cpu_to_le_16(
10152                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10153         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10154                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10155         cmd.num_entries = rte_cpu_to_le_16(count);
10156         cmd.seid = rte_cpu_to_le_16(seid);
10157         cmd.destination = rte_cpu_to_le_16(dst_id);
10158
10159         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10160         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10161         PMD_DRV_LOG(INFO,
10162                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10163                 hw->aq.asq_last_status, resp->rule_id,
10164                 resp->mirror_rules_used, resp->mirror_rules_free);
10165         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10166
10167         return status;
10168 }
10169
10170 /**
10171  * i40e_aq_del_mirror_rule
10172  * @hw: pointer to the hardware structure
10173  * @seid: VEB seid to add mirror rule to
10174  * @entries: Buffer which contains the entities to be mirrored
10175  * @count: number of entities contained in the buffer
10176  * @rule_id:the rule_id of the rule to be delete
10177  *
10178  * Delete a mirror rule for a given veb.
10179  *
10180  **/
10181 static enum i40e_status_code
10182 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10183                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10184                 uint16_t count, uint16_t rule_id)
10185 {
10186         struct i40e_aq_desc desc;
10187         struct i40e_aqc_add_delete_mirror_rule cmd;
10188         uint16_t buff_len = 0;
10189         enum i40e_status_code status;
10190         void *buff = NULL;
10191
10192         i40e_fill_default_direct_cmd_desc(&desc,
10193                                           i40e_aqc_opc_delete_mirror_rule);
10194         memset(&cmd, 0, sizeof(cmd));
10195         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10196                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10197                                                           I40E_AQ_FLAG_RD));
10198                 cmd.num_entries = count;
10199                 buff_len = sizeof(uint16_t) * count;
10200                 desc.datalen = rte_cpu_to_le_16(buff_len);
10201                 buff = (void *)entries;
10202         } else
10203                 /* rule id is filled in destination field for deleting mirror rule */
10204                 cmd.destination = rte_cpu_to_le_16(rule_id);
10205
10206         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10207                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10208         cmd.seid = rte_cpu_to_le_16(seid);
10209
10210         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10211         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10212
10213         return status;
10214 }
10215
10216 /**
10217  * i40e_mirror_rule_set
10218  * @dev: pointer to the hardware structure
10219  * @mirror_conf: mirror rule info
10220  * @sw_id: mirror rule's sw_id
10221  * @on: enable/disable
10222  *
10223  * set a mirror rule.
10224  *
10225  **/
10226 static int
10227 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10228                         struct rte_eth_mirror_conf *mirror_conf,
10229                         uint8_t sw_id, uint8_t on)
10230 {
10231         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10232         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10233         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10234         struct i40e_mirror_rule *parent = NULL;
10235         uint16_t seid, dst_seid, rule_id;
10236         uint16_t i, j = 0;
10237         int ret;
10238
10239         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10240
10241         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10242                 PMD_DRV_LOG(ERR,
10243                         "mirror rule can not be configured without veb or vfs.");
10244                 return -ENOSYS;
10245         }
10246         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10247                 PMD_DRV_LOG(ERR, "mirror table is full.");
10248                 return -ENOSPC;
10249         }
10250         if (mirror_conf->dst_pool > pf->vf_num) {
10251                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10252                                  mirror_conf->dst_pool);
10253                 return -EINVAL;
10254         }
10255
10256         seid = pf->main_vsi->veb->seid;
10257
10258         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10259                 if (sw_id <= it->index) {
10260                         mirr_rule = it;
10261                         break;
10262                 }
10263                 parent = it;
10264         }
10265         if (mirr_rule && sw_id == mirr_rule->index) {
10266                 if (on) {
10267                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10268                         return -EEXIST;
10269                 } else {
10270                         ret = i40e_aq_del_mirror_rule(hw, seid,
10271                                         mirr_rule->rule_type,
10272                                         mirr_rule->entries,
10273                                         mirr_rule->num_entries, mirr_rule->id);
10274                         if (ret < 0) {
10275                                 PMD_DRV_LOG(ERR,
10276                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10277                                         ret, hw->aq.asq_last_status);
10278                                 return -ENOSYS;
10279                         }
10280                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10281                         rte_free(mirr_rule);
10282                         pf->nb_mirror_rule--;
10283                         return 0;
10284                 }
10285         } else if (!on) {
10286                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10287                 return -ENOENT;
10288         }
10289
10290         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10291                                 sizeof(struct i40e_mirror_rule) , 0);
10292         if (!mirr_rule) {
10293                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10294                 return I40E_ERR_NO_MEMORY;
10295         }
10296         switch (mirror_conf->rule_type) {
10297         case ETH_MIRROR_VLAN:
10298                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10299                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10300                                 mirr_rule->entries[j] =
10301                                         mirror_conf->vlan.vlan_id[i];
10302                                 j++;
10303                         }
10304                 }
10305                 if (j == 0) {
10306                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10307                         rte_free(mirr_rule);
10308                         return -EINVAL;
10309                 }
10310                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10311                 break;
10312         case ETH_MIRROR_VIRTUAL_POOL_UP:
10313         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10314                 /* check if the specified pool bit is out of range */
10315                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10316                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10317                         rte_free(mirr_rule);
10318                         return -EINVAL;
10319                 }
10320                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10321                         if (mirror_conf->pool_mask & (1ULL << i)) {
10322                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10323                                 j++;
10324                         }
10325                 }
10326                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10327                         /* add pf vsi to entries */
10328                         mirr_rule->entries[j] = pf->main_vsi_seid;
10329                         j++;
10330                 }
10331                 if (j == 0) {
10332                         PMD_DRV_LOG(ERR, "pool is not specified.");
10333                         rte_free(mirr_rule);
10334                         return -EINVAL;
10335                 }
10336                 /* egress and ingress in aq commands means from switch but not port */
10337                 mirr_rule->rule_type =
10338                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10339                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10340                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10341                 break;
10342         case ETH_MIRROR_UPLINK_PORT:
10343                 /* egress and ingress in aq commands means from switch but not port*/
10344                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10345                 break;
10346         case ETH_MIRROR_DOWNLINK_PORT:
10347                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10348                 break;
10349         default:
10350                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10351                         mirror_conf->rule_type);
10352                 rte_free(mirr_rule);
10353                 return -EINVAL;
10354         }
10355
10356         /* If the dst_pool is equal to vf_num, consider it as PF */
10357         if (mirror_conf->dst_pool == pf->vf_num)
10358                 dst_seid = pf->main_vsi_seid;
10359         else
10360                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10361
10362         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10363                                       mirr_rule->rule_type, mirr_rule->entries,
10364                                       j, &rule_id);
10365         if (ret < 0) {
10366                 PMD_DRV_LOG(ERR,
10367                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10368                         ret, hw->aq.asq_last_status);
10369                 rte_free(mirr_rule);
10370                 return -ENOSYS;
10371         }
10372
10373         mirr_rule->index = sw_id;
10374         mirr_rule->num_entries = j;
10375         mirr_rule->id = rule_id;
10376         mirr_rule->dst_vsi_seid = dst_seid;
10377
10378         if (parent)
10379                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10380         else
10381                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10382
10383         pf->nb_mirror_rule++;
10384         return 0;
10385 }
10386
10387 /**
10388  * i40e_mirror_rule_reset
10389  * @dev: pointer to the device
10390  * @sw_id: mirror rule's sw_id
10391  *
10392  * reset a mirror rule.
10393  *
10394  **/
10395 static int
10396 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10397 {
10398         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10399         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10400         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10401         uint16_t seid;
10402         int ret;
10403
10404         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10405
10406         seid = pf->main_vsi->veb->seid;
10407
10408         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10409                 if (sw_id == it->index) {
10410                         mirr_rule = it;
10411                         break;
10412                 }
10413         }
10414         if (mirr_rule) {
10415                 ret = i40e_aq_del_mirror_rule(hw, seid,
10416                                 mirr_rule->rule_type,
10417                                 mirr_rule->entries,
10418                                 mirr_rule->num_entries, mirr_rule->id);
10419                 if (ret < 0) {
10420                         PMD_DRV_LOG(ERR,
10421                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10422                                 ret, hw->aq.asq_last_status);
10423                         return -ENOSYS;
10424                 }
10425                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10426                 rte_free(mirr_rule);
10427                 pf->nb_mirror_rule--;
10428         } else {
10429                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10430                 return -ENOENT;
10431         }
10432         return 0;
10433 }
10434
10435 static uint64_t
10436 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10437 {
10438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10439         uint64_t systim_cycles;
10440
10441         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10442         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10443                         << 32;
10444
10445         return systim_cycles;
10446 }
10447
10448 static uint64_t
10449 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10450 {
10451         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10452         uint64_t rx_tstamp;
10453
10454         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10455         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10456                         << 32;
10457
10458         return rx_tstamp;
10459 }
10460
10461 static uint64_t
10462 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10463 {
10464         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10465         uint64_t tx_tstamp;
10466
10467         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10468         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10469                         << 32;
10470
10471         return tx_tstamp;
10472 }
10473
10474 static void
10475 i40e_start_timecounters(struct rte_eth_dev *dev)
10476 {
10477         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10478         struct i40e_adapter *adapter =
10479                         (struct i40e_adapter *)dev->data->dev_private;
10480         struct rte_eth_link link;
10481         uint32_t tsync_inc_l;
10482         uint32_t tsync_inc_h;
10483
10484         /* Get current link speed. */
10485         i40e_dev_link_update(dev, 1);
10486         rte_eth_linkstatus_get(dev, &link);
10487
10488         switch (link.link_speed) {
10489         case ETH_SPEED_NUM_40G:
10490                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10491                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10492                 break;
10493         case ETH_SPEED_NUM_10G:
10494                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10495                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10496                 break;
10497         case ETH_SPEED_NUM_1G:
10498                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10499                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10500                 break;
10501         default:
10502                 tsync_inc_l = 0x0;
10503                 tsync_inc_h = 0x0;
10504         }
10505
10506         /* Set the timesync increment value. */
10507         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10508         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10509
10510         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10511         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10512         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10513
10514         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10515         adapter->systime_tc.cc_shift = 0;
10516         adapter->systime_tc.nsec_mask = 0;
10517
10518         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10519         adapter->rx_tstamp_tc.cc_shift = 0;
10520         adapter->rx_tstamp_tc.nsec_mask = 0;
10521
10522         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10523         adapter->tx_tstamp_tc.cc_shift = 0;
10524         adapter->tx_tstamp_tc.nsec_mask = 0;
10525 }
10526
10527 static int
10528 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10529 {
10530         struct i40e_adapter *adapter =
10531                         (struct i40e_adapter *)dev->data->dev_private;
10532
10533         adapter->systime_tc.nsec += delta;
10534         adapter->rx_tstamp_tc.nsec += delta;
10535         adapter->tx_tstamp_tc.nsec += delta;
10536
10537         return 0;
10538 }
10539
10540 static int
10541 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10542 {
10543         uint64_t ns;
10544         struct i40e_adapter *adapter =
10545                         (struct i40e_adapter *)dev->data->dev_private;
10546
10547         ns = rte_timespec_to_ns(ts);
10548
10549         /* Set the timecounters to a new value. */
10550         adapter->systime_tc.nsec = ns;
10551         adapter->rx_tstamp_tc.nsec = ns;
10552         adapter->tx_tstamp_tc.nsec = ns;
10553
10554         return 0;
10555 }
10556
10557 static int
10558 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10559 {
10560         uint64_t ns, systime_cycles;
10561         struct i40e_adapter *adapter =
10562                         (struct i40e_adapter *)dev->data->dev_private;
10563
10564         systime_cycles = i40e_read_systime_cyclecounter(dev);
10565         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10566         *ts = rte_ns_to_timespec(ns);
10567
10568         return 0;
10569 }
10570
10571 static int
10572 i40e_timesync_enable(struct rte_eth_dev *dev)
10573 {
10574         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10575         uint32_t tsync_ctl_l;
10576         uint32_t tsync_ctl_h;
10577
10578         /* Stop the timesync system time. */
10579         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10580         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10581         /* Reset the timesync system time value. */
10582         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10583         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10584
10585         i40e_start_timecounters(dev);
10586
10587         /* Clear timesync registers. */
10588         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10589         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10590         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10591         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10592         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10593         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10594
10595         /* Enable timestamping of PTP packets. */
10596         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10597         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10598
10599         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10600         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10601         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10602
10603         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10604         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10605
10606         return 0;
10607 }
10608
10609 static int
10610 i40e_timesync_disable(struct rte_eth_dev *dev)
10611 {
10612         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10613         uint32_t tsync_ctl_l;
10614         uint32_t tsync_ctl_h;
10615
10616         /* Disable timestamping of transmitted PTP packets. */
10617         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10618         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10619
10620         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10621         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10622
10623         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10624         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10625
10626         /* Reset the timesync increment value. */
10627         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10628         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10629
10630         return 0;
10631 }
10632
10633 static int
10634 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10635                                 struct timespec *timestamp, uint32_t flags)
10636 {
10637         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10638         struct i40e_adapter *adapter =
10639                 (struct i40e_adapter *)dev->data->dev_private;
10640
10641         uint32_t sync_status;
10642         uint32_t index = flags & 0x03;
10643         uint64_t rx_tstamp_cycles;
10644         uint64_t ns;
10645
10646         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10647         if ((sync_status & (1 << index)) == 0)
10648                 return -EINVAL;
10649
10650         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10651         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10652         *timestamp = rte_ns_to_timespec(ns);
10653
10654         return 0;
10655 }
10656
10657 static int
10658 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10659                                 struct timespec *timestamp)
10660 {
10661         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10662         struct i40e_adapter *adapter =
10663                 (struct i40e_adapter *)dev->data->dev_private;
10664
10665         uint32_t sync_status;
10666         uint64_t tx_tstamp_cycles;
10667         uint64_t ns;
10668
10669         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10670         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10671                 return -EINVAL;
10672
10673         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10674         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10675         *timestamp = rte_ns_to_timespec(ns);
10676
10677         return 0;
10678 }
10679
10680 /*
10681  * i40e_parse_dcb_configure - parse dcb configure from user
10682  * @dev: the device being configured
10683  * @dcb_cfg: pointer of the result of parse
10684  * @*tc_map: bit map of enabled traffic classes
10685  *
10686  * Returns 0 on success, negative value on failure
10687  */
10688 static int
10689 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10690                          struct i40e_dcbx_config *dcb_cfg,
10691                          uint8_t *tc_map)
10692 {
10693         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10694         uint8_t i, tc_bw, bw_lf;
10695
10696         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10697
10698         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10699         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10700                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10701                 return -EINVAL;
10702         }
10703
10704         /* assume each tc has the same bw */
10705         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10706         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10707                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10708         /* to ensure the sum of tcbw is equal to 100 */
10709         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10710         for (i = 0; i < bw_lf; i++)
10711                 dcb_cfg->etscfg.tcbwtable[i]++;
10712
10713         /* assume each tc has the same Transmission Selection Algorithm */
10714         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10715                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10716
10717         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10718                 dcb_cfg->etscfg.prioritytable[i] =
10719                                 dcb_rx_conf->dcb_tc[i];
10720
10721         /* FW needs one App to configure HW */
10722         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10723         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10724         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10725         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10726
10727         if (dcb_rx_conf->nb_tcs == 0)
10728                 *tc_map = 1; /* tc0 only */
10729         else
10730                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10731
10732         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10733                 dcb_cfg->pfc.willing = 0;
10734                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10735                 dcb_cfg->pfc.pfcenable = *tc_map;
10736         }
10737         return 0;
10738 }
10739
10740
10741 static enum i40e_status_code
10742 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10743                               struct i40e_aqc_vsi_properties_data *info,
10744                               uint8_t enabled_tcmap)
10745 {
10746         enum i40e_status_code ret;
10747         int i, total_tc = 0;
10748         uint16_t qpnum_per_tc, bsf, qp_idx;
10749         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10750         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10751         uint16_t used_queues;
10752
10753         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10754         if (ret != I40E_SUCCESS)
10755                 return ret;
10756
10757         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10758                 if (enabled_tcmap & (1 << i))
10759                         total_tc++;
10760         }
10761         if (total_tc == 0)
10762                 total_tc = 1;
10763         vsi->enabled_tc = enabled_tcmap;
10764
10765         /* different VSI has different queues assigned */
10766         if (vsi->type == I40E_VSI_MAIN)
10767                 used_queues = dev_data->nb_rx_queues -
10768                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10769         else if (vsi->type == I40E_VSI_VMDQ2)
10770                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10771         else {
10772                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10773                 return I40E_ERR_NO_AVAILABLE_VSI;
10774         }
10775
10776         qpnum_per_tc = used_queues / total_tc;
10777         /* Number of queues per enabled TC */
10778         if (qpnum_per_tc == 0) {
10779                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10780                 return I40E_ERR_INVALID_QP_ID;
10781         }
10782         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10783                                 I40E_MAX_Q_PER_TC);
10784         bsf = rte_bsf32(qpnum_per_tc);
10785
10786         /**
10787          * Configure TC and queue mapping parameters, for enabled TC,
10788          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10789          * default queue will serve it.
10790          */
10791         qp_idx = 0;
10792         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10793                 if (vsi->enabled_tc & (1 << i)) {
10794                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10795                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10796                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10797                         qp_idx += qpnum_per_tc;
10798                 } else
10799                         info->tc_mapping[i] = 0;
10800         }
10801
10802         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10803         if (vsi->type == I40E_VSI_SRIOV) {
10804                 info->mapping_flags |=
10805                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10806                 for (i = 0; i < vsi->nb_qps; i++)
10807                         info->queue_mapping[i] =
10808                                 rte_cpu_to_le_16(vsi->base_queue + i);
10809         } else {
10810                 info->mapping_flags |=
10811                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10812                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10813         }
10814         info->valid_sections |=
10815                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10816
10817         return I40E_SUCCESS;
10818 }
10819
10820 /*
10821  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10822  * @veb: VEB to be configured
10823  * @tc_map: enabled TC bitmap
10824  *
10825  * Returns 0 on success, negative value on failure
10826  */
10827 static enum i40e_status_code
10828 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10829 {
10830         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10831         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10832         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10833         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10834         enum i40e_status_code ret = I40E_SUCCESS;
10835         int i;
10836         uint32_t bw_max;
10837
10838         /* Check if enabled_tc is same as existing or new TCs */
10839         if (veb->enabled_tc == tc_map)
10840                 return ret;
10841
10842         /* configure tc bandwidth */
10843         memset(&veb_bw, 0, sizeof(veb_bw));
10844         veb_bw.tc_valid_bits = tc_map;
10845         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10846         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10847                 if (tc_map & BIT_ULL(i))
10848                         veb_bw.tc_bw_share_credits[i] = 1;
10849         }
10850         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10851                                                    &veb_bw, NULL);
10852         if (ret) {
10853                 PMD_INIT_LOG(ERR,
10854                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10855                         hw->aq.asq_last_status);
10856                 return ret;
10857         }
10858
10859         memset(&ets_query, 0, sizeof(ets_query));
10860         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10861                                                    &ets_query, NULL);
10862         if (ret != I40E_SUCCESS) {
10863                 PMD_DRV_LOG(ERR,
10864                         "Failed to get switch_comp ETS configuration %u",
10865                         hw->aq.asq_last_status);
10866                 return ret;
10867         }
10868         memset(&bw_query, 0, sizeof(bw_query));
10869         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10870                                                   &bw_query, NULL);
10871         if (ret != I40E_SUCCESS) {
10872                 PMD_DRV_LOG(ERR,
10873                         "Failed to get switch_comp bandwidth configuration %u",
10874                         hw->aq.asq_last_status);
10875                 return ret;
10876         }
10877
10878         /* store and print out BW info */
10879         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10880         veb->bw_info.bw_max = ets_query.tc_bw_max;
10881         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10882         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10883         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10884                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10885                      I40E_16_BIT_WIDTH);
10886         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10887                 veb->bw_info.bw_ets_share_credits[i] =
10888                                 bw_query.tc_bw_share_credits[i];
10889                 veb->bw_info.bw_ets_credits[i] =
10890                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10891                 /* 4 bits per TC, 4th bit is reserved */
10892                 veb->bw_info.bw_ets_max[i] =
10893                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10894                                   RTE_LEN2MASK(3, uint8_t));
10895                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10896                             veb->bw_info.bw_ets_share_credits[i]);
10897                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10898                             veb->bw_info.bw_ets_credits[i]);
10899                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10900                             veb->bw_info.bw_ets_max[i]);
10901         }
10902
10903         veb->enabled_tc = tc_map;
10904
10905         return ret;
10906 }
10907
10908
10909 /*
10910  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10911  * @vsi: VSI to be configured
10912  * @tc_map: enabled TC bitmap
10913  *
10914  * Returns 0 on success, negative value on failure
10915  */
10916 static enum i40e_status_code
10917 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10918 {
10919         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10920         struct i40e_vsi_context ctxt;
10921         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10922         enum i40e_status_code ret = I40E_SUCCESS;
10923         int i;
10924
10925         /* Check if enabled_tc is same as existing or new TCs */
10926         if (vsi->enabled_tc == tc_map)
10927                 return ret;
10928
10929         /* configure tc bandwidth */
10930         memset(&bw_data, 0, sizeof(bw_data));
10931         bw_data.tc_valid_bits = tc_map;
10932         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10933         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10934                 if (tc_map & BIT_ULL(i))
10935                         bw_data.tc_bw_credits[i] = 1;
10936         }
10937         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10938         if (ret) {
10939                 PMD_INIT_LOG(ERR,
10940                         "AQ command Config VSI BW allocation per TC failed = %d",
10941                         hw->aq.asq_last_status);
10942                 goto out;
10943         }
10944         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10945                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10946
10947         /* Update Queue Pairs Mapping for currently enabled UPs */
10948         ctxt.seid = vsi->seid;
10949         ctxt.pf_num = hw->pf_id;
10950         ctxt.vf_num = 0;
10951         ctxt.uplink_seid = vsi->uplink_seid;
10952         ctxt.info = vsi->info;
10953         i40e_get_cap(hw);
10954         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10955         if (ret)
10956                 goto out;
10957
10958         /* Update the VSI after updating the VSI queue-mapping information */
10959         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10960         if (ret) {
10961                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10962                         hw->aq.asq_last_status);
10963                 goto out;
10964         }
10965         /* update the local VSI info with updated queue map */
10966         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10967                                         sizeof(vsi->info.tc_mapping));
10968         rte_memcpy(&vsi->info.queue_mapping,
10969                         &ctxt.info.queue_mapping,
10970                 sizeof(vsi->info.queue_mapping));
10971         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10972         vsi->info.valid_sections = 0;
10973
10974         /* query and update current VSI BW information */
10975         ret = i40e_vsi_get_bw_config(vsi);
10976         if (ret) {
10977                 PMD_INIT_LOG(ERR,
10978                          "Failed updating vsi bw info, err %s aq_err %s",
10979                          i40e_stat_str(hw, ret),
10980                          i40e_aq_str(hw, hw->aq.asq_last_status));
10981                 goto out;
10982         }
10983
10984         vsi->enabled_tc = tc_map;
10985
10986 out:
10987         return ret;
10988 }
10989
10990 /*
10991  * i40e_dcb_hw_configure - program the dcb setting to hw
10992  * @pf: pf the configuration is taken on
10993  * @new_cfg: new configuration
10994  * @tc_map: enabled TC bitmap
10995  *
10996  * Returns 0 on success, negative value on failure
10997  */
10998 static enum i40e_status_code
10999 i40e_dcb_hw_configure(struct i40e_pf *pf,
11000                       struct i40e_dcbx_config *new_cfg,
11001                       uint8_t tc_map)
11002 {
11003         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11004         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11005         struct i40e_vsi *main_vsi = pf->main_vsi;
11006         struct i40e_vsi_list *vsi_list;
11007         enum i40e_status_code ret;
11008         int i;
11009         uint32_t val;
11010
11011         /* Use the FW API if FW > v4.4*/
11012         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11013               (hw->aq.fw_maj_ver >= 5))) {
11014                 PMD_INIT_LOG(ERR,
11015                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11016                 return I40E_ERR_FIRMWARE_API_VERSION;
11017         }
11018
11019         /* Check if need reconfiguration */
11020         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11021                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11022                 return I40E_SUCCESS;
11023         }
11024
11025         /* Copy the new config to the current config */
11026         *old_cfg = *new_cfg;
11027         old_cfg->etsrec = old_cfg->etscfg;
11028         ret = i40e_set_dcb_config(hw);
11029         if (ret) {
11030                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11031                          i40e_stat_str(hw, ret),
11032                          i40e_aq_str(hw, hw->aq.asq_last_status));
11033                 return ret;
11034         }
11035         /* set receive Arbiter to RR mode and ETS scheme by default */
11036         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11037                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11038                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11039                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11040                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11041                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11042                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11043                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11044                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11045                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11046                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11047                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11048                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11049         }
11050         /* get local mib to check whether it is configured correctly */
11051         /* IEEE mode */
11052         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11053         /* Get Local DCB Config */
11054         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11055                                      &hw->local_dcbx_config);
11056
11057         /* if Veb is created, need to update TC of it at first */
11058         if (main_vsi->veb) {
11059                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11060                 if (ret)
11061                         PMD_INIT_LOG(WARNING,
11062                                  "Failed configuring TC for VEB seid=%d",
11063                                  main_vsi->veb->seid);
11064         }
11065         /* Update each VSI */
11066         i40e_vsi_config_tc(main_vsi, tc_map);
11067         if (main_vsi->veb) {
11068                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11069                         /* Beside main VSI and VMDQ VSIs, only enable default
11070                          * TC for other VSIs
11071                          */
11072                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11073                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11074                                                          tc_map);
11075                         else
11076                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11077                                                          I40E_DEFAULT_TCMAP);
11078                         if (ret)
11079                                 PMD_INIT_LOG(WARNING,
11080                                         "Failed configuring TC for VSI seid=%d",
11081                                         vsi_list->vsi->seid);
11082                         /* continue */
11083                 }
11084         }
11085         return I40E_SUCCESS;
11086 }
11087
11088 /*
11089  * i40e_dcb_init_configure - initial dcb config
11090  * @dev: device being configured
11091  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11092  *
11093  * Returns 0 on success, negative value on failure
11094  */
11095 int
11096 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11097 {
11098         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11099         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11100         int i, ret = 0;
11101
11102         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11103                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11104                 return -ENOTSUP;
11105         }
11106
11107         /* DCB initialization:
11108          * Update DCB configuration from the Firmware and configure
11109          * LLDP MIB change event.
11110          */
11111         if (sw_dcb == TRUE) {
11112                 ret = i40e_init_dcb(hw);
11113                 /* If lldp agent is stopped, the return value from
11114                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11115                  * adminq status. Otherwise, it should return success.
11116                  */
11117                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11118                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11119                         memset(&hw->local_dcbx_config, 0,
11120                                 sizeof(struct i40e_dcbx_config));
11121                         /* set dcb default configuration */
11122                         hw->local_dcbx_config.etscfg.willing = 0;
11123                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11124                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11125                         hw->local_dcbx_config.etscfg.tsatable[0] =
11126                                                 I40E_IEEE_TSA_ETS;
11127                         /* all UPs mapping to TC0 */
11128                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11129                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11130                         hw->local_dcbx_config.etsrec =
11131                                 hw->local_dcbx_config.etscfg;
11132                         hw->local_dcbx_config.pfc.willing = 0;
11133                         hw->local_dcbx_config.pfc.pfccap =
11134                                                 I40E_MAX_TRAFFIC_CLASS;
11135                         /* FW needs one App to configure HW */
11136                         hw->local_dcbx_config.numapps = 1;
11137                         hw->local_dcbx_config.app[0].selector =
11138                                                 I40E_APP_SEL_ETHTYPE;
11139                         hw->local_dcbx_config.app[0].priority = 3;
11140                         hw->local_dcbx_config.app[0].protocolid =
11141                                                 I40E_APP_PROTOID_FCOE;
11142                         ret = i40e_set_dcb_config(hw);
11143                         if (ret) {
11144                                 PMD_INIT_LOG(ERR,
11145                                         "default dcb config fails. err = %d, aq_err = %d.",
11146                                         ret, hw->aq.asq_last_status);
11147                                 return -ENOSYS;
11148                         }
11149                 } else {
11150                         PMD_INIT_LOG(ERR,
11151                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11152                                 ret, hw->aq.asq_last_status);
11153                         return -ENOTSUP;
11154                 }
11155         } else {
11156                 ret = i40e_aq_start_lldp(hw, NULL);
11157                 if (ret != I40E_SUCCESS)
11158                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11159
11160                 ret = i40e_init_dcb(hw);
11161                 if (!ret) {
11162                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11163                                 PMD_INIT_LOG(ERR,
11164                                         "HW doesn't support DCBX offload.");
11165                                 return -ENOTSUP;
11166                         }
11167                 } else {
11168                         PMD_INIT_LOG(ERR,
11169                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11170                                 ret, hw->aq.asq_last_status);
11171                         return -ENOTSUP;
11172                 }
11173         }
11174         return 0;
11175 }
11176
11177 /*
11178  * i40e_dcb_setup - setup dcb related config
11179  * @dev: device being configured
11180  *
11181  * Returns 0 on success, negative value on failure
11182  */
11183 static int
11184 i40e_dcb_setup(struct rte_eth_dev *dev)
11185 {
11186         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11187         struct i40e_dcbx_config dcb_cfg;
11188         uint8_t tc_map = 0;
11189         int ret = 0;
11190
11191         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11192                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11193                 return -ENOTSUP;
11194         }
11195
11196         if (pf->vf_num != 0)
11197                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11198
11199         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11200         if (ret) {
11201                 PMD_INIT_LOG(ERR, "invalid dcb config");
11202                 return -EINVAL;
11203         }
11204         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11205         if (ret) {
11206                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11207                 return -ENOSYS;
11208         }
11209
11210         return 0;
11211 }
11212
11213 static int
11214 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11215                       struct rte_eth_dcb_info *dcb_info)
11216 {
11217         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11218         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11219         struct i40e_vsi *vsi = pf->main_vsi;
11220         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11221         uint16_t bsf, tc_mapping;
11222         int i, j = 0;
11223
11224         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11225                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11226         else
11227                 dcb_info->nb_tcs = 1;
11228         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11229                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11230         for (i = 0; i < dcb_info->nb_tcs; i++)
11231                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11232
11233         /* get queue mapping if vmdq is disabled */
11234         if (!pf->nb_cfg_vmdq_vsi) {
11235                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11236                         if (!(vsi->enabled_tc & (1 << i)))
11237                                 continue;
11238                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11239                         dcb_info->tc_queue.tc_rxq[j][i].base =
11240                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11241                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11242                         dcb_info->tc_queue.tc_txq[j][i].base =
11243                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11244                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11245                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11246                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11247                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11248                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11249                 }
11250                 return 0;
11251         }
11252
11253         /* get queue mapping if vmdq is enabled */
11254         do {
11255                 vsi = pf->vmdq[j].vsi;
11256                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11257                         if (!(vsi->enabled_tc & (1 << i)))
11258                                 continue;
11259                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11260                         dcb_info->tc_queue.tc_rxq[j][i].base =
11261                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11262                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11263                         dcb_info->tc_queue.tc_txq[j][i].base =
11264                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11265                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11266                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11267                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11268                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11269                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11270                 }
11271                 j++;
11272         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11273         return 0;
11274 }
11275
11276 static int
11277 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11278 {
11279         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11280         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11281         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11282         uint16_t msix_intr;
11283
11284         msix_intr = intr_handle->intr_vec[queue_id];
11285         if (msix_intr == I40E_MISC_VEC_ID)
11286                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11287                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11288                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11289                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11290         else
11291                 I40E_WRITE_REG(hw,
11292                                I40E_PFINT_DYN_CTLN(msix_intr -
11293                                                    I40E_RX_VEC_START),
11294                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11295                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11296                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11297
11298         I40E_WRITE_FLUSH(hw);
11299         rte_intr_enable(&pci_dev->intr_handle);
11300
11301         return 0;
11302 }
11303
11304 static int
11305 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11306 {
11307         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11308         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11309         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11310         uint16_t msix_intr;
11311
11312         msix_intr = intr_handle->intr_vec[queue_id];
11313         if (msix_intr == I40E_MISC_VEC_ID)
11314                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11315                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11316         else
11317                 I40E_WRITE_REG(hw,
11318                                I40E_PFINT_DYN_CTLN(msix_intr -
11319                                                    I40E_RX_VEC_START),
11320                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11321         I40E_WRITE_FLUSH(hw);
11322
11323         return 0;
11324 }
11325
11326 static int i40e_get_regs(struct rte_eth_dev *dev,
11327                          struct rte_dev_reg_info *regs)
11328 {
11329         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11330         uint32_t *ptr_data = regs->data;
11331         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11332         const struct i40e_reg_info *reg_info;
11333
11334         if (ptr_data == NULL) {
11335                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11336                 regs->width = sizeof(uint32_t);
11337                 return 0;
11338         }
11339
11340         /* The first few registers have to be read using AQ operations */
11341         reg_idx = 0;
11342         while (i40e_regs_adminq[reg_idx].name) {
11343                 reg_info = &i40e_regs_adminq[reg_idx++];
11344                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11345                         for (arr_idx2 = 0;
11346                                         arr_idx2 <= reg_info->count2;
11347                                         arr_idx2++) {
11348                                 reg_offset = arr_idx * reg_info->stride1 +
11349                                         arr_idx2 * reg_info->stride2;
11350                                 reg_offset += reg_info->base_addr;
11351                                 ptr_data[reg_offset >> 2] =
11352                                         i40e_read_rx_ctl(hw, reg_offset);
11353                         }
11354         }
11355
11356         /* The remaining registers can be read using primitives */
11357         reg_idx = 0;
11358         while (i40e_regs_others[reg_idx].name) {
11359                 reg_info = &i40e_regs_others[reg_idx++];
11360                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11361                         for (arr_idx2 = 0;
11362                                         arr_idx2 <= reg_info->count2;
11363                                         arr_idx2++) {
11364                                 reg_offset = arr_idx * reg_info->stride1 +
11365                                         arr_idx2 * reg_info->stride2;
11366                                 reg_offset += reg_info->base_addr;
11367                                 ptr_data[reg_offset >> 2] =
11368                                         I40E_READ_REG(hw, reg_offset);
11369                         }
11370         }
11371
11372         return 0;
11373 }
11374
11375 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11376 {
11377         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11378
11379         /* Convert word count to byte count */
11380         return hw->nvm.sr_size << 1;
11381 }
11382
11383 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11384                            struct rte_dev_eeprom_info *eeprom)
11385 {
11386         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11387         uint16_t *data = eeprom->data;
11388         uint16_t offset, length, cnt_words;
11389         int ret_code;
11390
11391         offset = eeprom->offset >> 1;
11392         length = eeprom->length >> 1;
11393         cnt_words = length;
11394
11395         if (offset > hw->nvm.sr_size ||
11396                 offset + length > hw->nvm.sr_size) {
11397                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11398                 return -EINVAL;
11399         }
11400
11401         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11402
11403         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11404         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11405                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11406                 return -EIO;
11407         }
11408
11409         return 0;
11410 }
11411
11412 static int i40e_get_module_info(struct rte_eth_dev *dev,
11413                                 struct rte_eth_dev_module_info *modinfo)
11414 {
11415         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11416         uint32_t sff8472_comp = 0;
11417         uint32_t sff8472_swap = 0;
11418         uint32_t sff8636_rev = 0;
11419         i40e_status status;
11420         uint32_t type = 0;
11421
11422         /* Check if firmware supports reading module EEPROM. */
11423         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11424                 PMD_DRV_LOG(ERR,
11425                             "Module EEPROM memory read not supported. "
11426                             "Please update the NVM image.\n");
11427                 return -EINVAL;
11428         }
11429
11430         status = i40e_update_link_info(hw);
11431         if (status)
11432                 return -EIO;
11433
11434         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11435                 PMD_DRV_LOG(ERR,
11436                             "Cannot read module EEPROM memory. "
11437                             "No module connected.\n");
11438                 return -EINVAL;
11439         }
11440
11441         type = hw->phy.link_info.module_type[0];
11442
11443         switch (type) {
11444         case I40E_MODULE_TYPE_SFP:
11445                 status = i40e_aq_get_phy_register(hw,
11446                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11447                                 I40E_I2C_EEPROM_DEV_ADDR,
11448                                 I40E_MODULE_SFF_8472_COMP,
11449                                 &sff8472_comp, NULL);
11450                 if (status)
11451                         return -EIO;
11452
11453                 status = i40e_aq_get_phy_register(hw,
11454                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11455                                 I40E_I2C_EEPROM_DEV_ADDR,
11456                                 I40E_MODULE_SFF_8472_SWAP,
11457                                 &sff8472_swap, NULL);
11458                 if (status)
11459                         return -EIO;
11460
11461                 /* Check if the module requires address swap to access
11462                  * the other EEPROM memory page.
11463                  */
11464                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11465                         PMD_DRV_LOG(WARNING,
11466                                     "Module address swap to access "
11467                                     "page 0xA2 is not supported.\n");
11468                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11469                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11470                 } else if (sff8472_comp == 0x00) {
11471                         /* Module is not SFF-8472 compliant */
11472                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11473                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11474                 } else {
11475                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11476                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11477                 }
11478                 break;
11479         case I40E_MODULE_TYPE_QSFP_PLUS:
11480                 /* Read from memory page 0. */
11481                 status = i40e_aq_get_phy_register(hw,
11482                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11483                                 0,
11484                                 I40E_MODULE_REVISION_ADDR,
11485                                 &sff8636_rev, NULL);
11486                 if (status)
11487                         return -EIO;
11488                 /* Determine revision compliance byte */
11489                 if (sff8636_rev > 0x02) {
11490                         /* Module is SFF-8636 compliant */
11491                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11492                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11493                 } else {
11494                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11495                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11496                 }
11497                 break;
11498         case I40E_MODULE_TYPE_QSFP28:
11499                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11500                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11501                 break;
11502         default:
11503                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11504                 return -EINVAL;
11505         }
11506         return 0;
11507 }
11508
11509 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11510                                   struct rte_dev_eeprom_info *info)
11511 {
11512         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11513         bool is_sfp = false;
11514         i40e_status status;
11515         uint8_t *data = info->data;
11516         uint32_t value = 0;
11517         uint32_t i;
11518
11519         if (!info || !info->length || !data)
11520                 return -EINVAL;
11521
11522         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11523                 is_sfp = true;
11524
11525         for (i = 0; i < info->length; i++) {
11526                 u32 offset = i + info->offset;
11527                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11528
11529                 /* Check if we need to access the other memory page */
11530                 if (is_sfp) {
11531                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11532                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11533                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11534                         }
11535                 } else {
11536                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11537                                 /* Compute memory page number and offset. */
11538                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11539                                 addr++;
11540                         }
11541                 }
11542                 status = i40e_aq_get_phy_register(hw,
11543                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11544                                 addr, offset, &value, NULL);
11545                 if (status)
11546                         return -EIO;
11547                 data[i] = (uint8_t)value;
11548         }
11549         return 0;
11550 }
11551
11552 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11553                                      struct ether_addr *mac_addr)
11554 {
11555         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11556         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11557         struct i40e_vsi *vsi = pf->main_vsi;
11558         struct i40e_mac_filter_info mac_filter;
11559         struct i40e_mac_filter *f;
11560         int ret;
11561
11562         if (!is_valid_assigned_ether_addr(mac_addr)) {
11563                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11564                 return -EINVAL;
11565         }
11566
11567         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11568                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11569                         break;
11570         }
11571
11572         if (f == NULL) {
11573                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11574                 return -EIO;
11575         }
11576
11577         mac_filter = f->mac_info;
11578         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11579         if (ret != I40E_SUCCESS) {
11580                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11581                 return -EIO;
11582         }
11583         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11584         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11585         if (ret != I40E_SUCCESS) {
11586                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11587                 return -EIO;
11588         }
11589         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11590
11591         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11592                                         mac_addr->addr_bytes, NULL);
11593         if (ret != I40E_SUCCESS) {
11594                 PMD_DRV_LOG(ERR, "Failed to change mac");
11595                 return -EIO;
11596         }
11597
11598         return 0;
11599 }
11600
11601 static int
11602 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11603 {
11604         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11605         struct rte_eth_dev_data *dev_data = pf->dev_data;
11606         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11607         int ret = 0;
11608
11609         /* check if mtu is within the allowed range */
11610         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11611                 return -EINVAL;
11612
11613         /* mtu setting is forbidden if port is start */
11614         if (dev_data->dev_started) {
11615                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11616                             dev_data->port_id);
11617                 return -EBUSY;
11618         }
11619
11620         if (frame_size > ETHER_MAX_LEN)
11621                 dev_data->dev_conf.rxmode.offloads |=
11622                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11623         else
11624                 dev_data->dev_conf.rxmode.offloads &=
11625                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11626
11627         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11628
11629         return ret;
11630 }
11631
11632 /* Restore ethertype filter */
11633 static void
11634 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11635 {
11636         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11637         struct i40e_ethertype_filter_list
11638                 *ethertype_list = &pf->ethertype.ethertype_list;
11639         struct i40e_ethertype_filter *f;
11640         struct i40e_control_filter_stats stats;
11641         uint16_t flags;
11642
11643         TAILQ_FOREACH(f, ethertype_list, rules) {
11644                 flags = 0;
11645                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11646                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11647                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11648                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11649                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11650
11651                 memset(&stats, 0, sizeof(stats));
11652                 i40e_aq_add_rem_control_packet_filter(hw,
11653                                             f->input.mac_addr.addr_bytes,
11654                                             f->input.ether_type,
11655                                             flags, pf->main_vsi->seid,
11656                                             f->queue, 1, &stats, NULL);
11657         }
11658         PMD_DRV_LOG(INFO, "Ethertype filter:"
11659                     " mac_etype_used = %u, etype_used = %u,"
11660                     " mac_etype_free = %u, etype_free = %u",
11661                     stats.mac_etype_used, stats.etype_used,
11662                     stats.mac_etype_free, stats.etype_free);
11663 }
11664
11665 /* Restore tunnel filter */
11666 static void
11667 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11668 {
11669         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11670         struct i40e_vsi *vsi;
11671         struct i40e_pf_vf *vf;
11672         struct i40e_tunnel_filter_list
11673                 *tunnel_list = &pf->tunnel.tunnel_list;
11674         struct i40e_tunnel_filter *f;
11675         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11676         bool big_buffer = 0;
11677
11678         TAILQ_FOREACH(f, tunnel_list, rules) {
11679                 if (!f->is_to_vf)
11680                         vsi = pf->main_vsi;
11681                 else {
11682                         vf = &pf->vfs[f->vf_id];
11683                         vsi = vf->vsi;
11684                 }
11685                 memset(&cld_filter, 0, sizeof(cld_filter));
11686                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11687                         (struct ether_addr *)&cld_filter.element.outer_mac);
11688                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11689                         (struct ether_addr *)&cld_filter.element.inner_mac);
11690                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11691                 cld_filter.element.flags = f->input.flags;
11692                 cld_filter.element.tenant_id = f->input.tenant_id;
11693                 cld_filter.element.queue_number = f->queue;
11694                 rte_memcpy(cld_filter.general_fields,
11695                            f->input.general_fields,
11696                            sizeof(f->input.general_fields));
11697
11698                 if (((f->input.flags &
11699                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11700                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11701                     ((f->input.flags &
11702                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11703                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11704                     ((f->input.flags &
11705                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11706                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11707                         big_buffer = 1;
11708
11709                 if (big_buffer)
11710                         i40e_aq_add_cloud_filters_big_buffer(hw,
11711                                              vsi->seid, &cld_filter, 1);
11712                 else
11713                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11714                                                   &cld_filter.element, 1);
11715         }
11716 }
11717
11718 /* Restore rss filter */
11719 static inline void
11720 i40e_rss_filter_restore(struct i40e_pf *pf)
11721 {
11722         struct i40e_rte_flow_rss_conf *conf =
11723                                         &pf->rss_info;
11724         if (conf->conf.queue_num)
11725                 i40e_config_rss_filter(pf, conf, TRUE);
11726 }
11727
11728 static void
11729 i40e_filter_restore(struct i40e_pf *pf)
11730 {
11731         i40e_ethertype_filter_restore(pf);
11732         i40e_tunnel_filter_restore(pf);
11733         i40e_fdir_filter_restore(pf);
11734         i40e_rss_filter_restore(pf);
11735 }
11736
11737 static bool
11738 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11739 {
11740         if (strcmp(dev->device->driver->name, drv->driver.name))
11741                 return false;
11742
11743         return true;
11744 }
11745
11746 bool
11747 is_i40e_supported(struct rte_eth_dev *dev)
11748 {
11749         return is_device_supported(dev, &rte_i40e_pmd);
11750 }
11751
11752 struct i40e_customized_pctype*
11753 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11754 {
11755         int i;
11756
11757         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11758                 if (pf->customized_pctype[i].index == index)
11759                         return &pf->customized_pctype[i];
11760         }
11761         return NULL;
11762 }
11763
11764 static int
11765 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11766                               uint32_t pkg_size, uint32_t proto_num,
11767                               struct rte_pmd_i40e_proto_info *proto,
11768                               enum rte_pmd_i40e_package_op op)
11769 {
11770         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11771         uint32_t pctype_num;
11772         struct rte_pmd_i40e_ptype_info *pctype;
11773         uint32_t buff_size;
11774         struct i40e_customized_pctype *new_pctype = NULL;
11775         uint8_t proto_id;
11776         uint8_t pctype_value;
11777         char name[64];
11778         uint32_t i, j, n;
11779         int ret;
11780
11781         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11782             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11783                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11784                 return -1;
11785         }
11786
11787         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11788                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11789                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11790         if (ret) {
11791                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11792                 return -1;
11793         }
11794         if (!pctype_num) {
11795                 PMD_DRV_LOG(INFO, "No new pctype added");
11796                 return -1;
11797         }
11798
11799         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11800         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11801         if (!pctype) {
11802                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11803                 return -1;
11804         }
11805         /* get information about new pctype list */
11806         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11807                                         (uint8_t *)pctype, buff_size,
11808                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11809         if (ret) {
11810                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11811                 rte_free(pctype);
11812                 return -1;
11813         }
11814
11815         /* Update customized pctype. */
11816         for (i = 0; i < pctype_num; i++) {
11817                 pctype_value = pctype[i].ptype_id;
11818                 memset(name, 0, sizeof(name));
11819                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11820                         proto_id = pctype[i].protocols[j];
11821                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11822                                 continue;
11823                         for (n = 0; n < proto_num; n++) {
11824                                 if (proto[n].proto_id != proto_id)
11825                                         continue;
11826                                 strcat(name, proto[n].name);
11827                                 strcat(name, "_");
11828                                 break;
11829                         }
11830                 }
11831                 name[strlen(name) - 1] = '\0';
11832                 if (!strcmp(name, "GTPC"))
11833                         new_pctype =
11834                                 i40e_find_customized_pctype(pf,
11835                                                       I40E_CUSTOMIZED_GTPC);
11836                 else if (!strcmp(name, "GTPU_IPV4"))
11837                         new_pctype =
11838                                 i40e_find_customized_pctype(pf,
11839                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11840                 else if (!strcmp(name, "GTPU_IPV6"))
11841                         new_pctype =
11842                                 i40e_find_customized_pctype(pf,
11843                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11844                 else if (!strcmp(name, "GTPU"))
11845                         new_pctype =
11846                                 i40e_find_customized_pctype(pf,
11847                                                       I40E_CUSTOMIZED_GTPU);
11848                 if (new_pctype) {
11849                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11850                                 new_pctype->pctype = pctype_value;
11851                                 new_pctype->valid = true;
11852                         } else {
11853                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11854                                 new_pctype->valid = false;
11855                         }
11856                 }
11857         }
11858
11859         rte_free(pctype);
11860         return 0;
11861 }
11862
11863 static int
11864 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11865                              uint32_t pkg_size, uint32_t proto_num,
11866                              struct rte_pmd_i40e_proto_info *proto,
11867                              enum rte_pmd_i40e_package_op op)
11868 {
11869         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11870         uint16_t port_id = dev->data->port_id;
11871         uint32_t ptype_num;
11872         struct rte_pmd_i40e_ptype_info *ptype;
11873         uint32_t buff_size;
11874         uint8_t proto_id;
11875         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11876         uint32_t i, j, n;
11877         bool in_tunnel;
11878         int ret;
11879
11880         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11881             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11882                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11883                 return -1;
11884         }
11885
11886         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
11887                 rte_pmd_i40e_ptype_mapping_reset(port_id);
11888                 return 0;
11889         }
11890
11891         /* get information about new ptype num */
11892         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11893                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11894                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11895         if (ret) {
11896                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11897                 return ret;
11898         }
11899         if (!ptype_num) {
11900                 PMD_DRV_LOG(INFO, "No new ptype added");
11901                 return -1;
11902         }
11903
11904         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11905         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11906         if (!ptype) {
11907                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11908                 return -1;
11909         }
11910
11911         /* get information about new ptype list */
11912         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11913                                         (uint8_t *)ptype, buff_size,
11914                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11915         if (ret) {
11916                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11917                 rte_free(ptype);
11918                 return ret;
11919         }
11920
11921         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11922         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11923         if (!ptype_mapping) {
11924                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11925                 rte_free(ptype);
11926                 return -1;
11927         }
11928
11929         /* Update ptype mapping table. */
11930         for (i = 0; i < ptype_num; i++) {
11931                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11932                 ptype_mapping[i].sw_ptype = 0;
11933                 in_tunnel = false;
11934                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11935                         proto_id = ptype[i].protocols[j];
11936                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11937                                 continue;
11938                         for (n = 0; n < proto_num; n++) {
11939                                 if (proto[n].proto_id != proto_id)
11940                                         continue;
11941                                 memset(name, 0, sizeof(name));
11942                                 strcpy(name, proto[n].name);
11943                                 if (!strncasecmp(name, "PPPOE", 5))
11944                                         ptype_mapping[i].sw_ptype |=
11945                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11946                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11947                                          !in_tunnel) {
11948                                         ptype_mapping[i].sw_ptype |=
11949                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11950                                         ptype_mapping[i].sw_ptype |=
11951                                                 RTE_PTYPE_L4_FRAG;
11952                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11953                                            in_tunnel) {
11954                                         ptype_mapping[i].sw_ptype |=
11955                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11956                                         ptype_mapping[i].sw_ptype |=
11957                                                 RTE_PTYPE_INNER_L4_FRAG;
11958                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
11959                                         ptype_mapping[i].sw_ptype |=
11960                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11961                                         in_tunnel = true;
11962                                 } else if (!strncasecmp(name, "IPV4", 4) &&
11963                                            !in_tunnel)
11964                                         ptype_mapping[i].sw_ptype |=
11965                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11966                                 else if (!strncasecmp(name, "IPV4", 4) &&
11967                                          in_tunnel)
11968                                         ptype_mapping[i].sw_ptype |=
11969                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11970                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11971                                          !in_tunnel) {
11972                                         ptype_mapping[i].sw_ptype |=
11973                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11974                                         ptype_mapping[i].sw_ptype |=
11975                                                 RTE_PTYPE_L4_FRAG;
11976                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11977                                            in_tunnel) {
11978                                         ptype_mapping[i].sw_ptype |=
11979                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11980                                         ptype_mapping[i].sw_ptype |=
11981                                                 RTE_PTYPE_INNER_L4_FRAG;
11982                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
11983                                         ptype_mapping[i].sw_ptype |=
11984                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11985                                         in_tunnel = true;
11986                                 } else if (!strncasecmp(name, "IPV6", 4) &&
11987                                            !in_tunnel)
11988                                         ptype_mapping[i].sw_ptype |=
11989                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11990                                 else if (!strncasecmp(name, "IPV6", 4) &&
11991                                          in_tunnel)
11992                                         ptype_mapping[i].sw_ptype |=
11993                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11994                                 else if (!strncasecmp(name, "UDP", 3) &&
11995                                          !in_tunnel)
11996                                         ptype_mapping[i].sw_ptype |=
11997                                                 RTE_PTYPE_L4_UDP;
11998                                 else if (!strncasecmp(name, "UDP", 3) &&
11999                                          in_tunnel)
12000                                         ptype_mapping[i].sw_ptype |=
12001                                                 RTE_PTYPE_INNER_L4_UDP;
12002                                 else if (!strncasecmp(name, "TCP", 3) &&
12003                                          !in_tunnel)
12004                                         ptype_mapping[i].sw_ptype |=
12005                                                 RTE_PTYPE_L4_TCP;
12006                                 else if (!strncasecmp(name, "TCP", 3) &&
12007                                          in_tunnel)
12008                                         ptype_mapping[i].sw_ptype |=
12009                                                 RTE_PTYPE_INNER_L4_TCP;
12010                                 else if (!strncasecmp(name, "SCTP", 4) &&
12011                                          !in_tunnel)
12012                                         ptype_mapping[i].sw_ptype |=
12013                                                 RTE_PTYPE_L4_SCTP;
12014                                 else if (!strncasecmp(name, "SCTP", 4) &&
12015                                          in_tunnel)
12016                                         ptype_mapping[i].sw_ptype |=
12017                                                 RTE_PTYPE_INNER_L4_SCTP;
12018                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12019                                           !strncasecmp(name, "ICMPV6", 6)) &&
12020                                          !in_tunnel)
12021                                         ptype_mapping[i].sw_ptype |=
12022                                                 RTE_PTYPE_L4_ICMP;
12023                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12024                                           !strncasecmp(name, "ICMPV6", 6)) &&
12025                                          in_tunnel)
12026                                         ptype_mapping[i].sw_ptype |=
12027                                                 RTE_PTYPE_INNER_L4_ICMP;
12028                                 else if (!strncasecmp(name, "GTPC", 4)) {
12029                                         ptype_mapping[i].sw_ptype |=
12030                                                 RTE_PTYPE_TUNNEL_GTPC;
12031                                         in_tunnel = true;
12032                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12033                                         ptype_mapping[i].sw_ptype |=
12034                                                 RTE_PTYPE_TUNNEL_GTPU;
12035                                         in_tunnel = true;
12036                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12037                                         ptype_mapping[i].sw_ptype |=
12038                                                 RTE_PTYPE_TUNNEL_GRENAT;
12039                                         in_tunnel = true;
12040                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
12041                                         ptype_mapping[i].sw_ptype |=
12042                                                 RTE_PTYPE_TUNNEL_L2TP;
12043                                         in_tunnel = true;
12044                                 }
12045
12046                                 break;
12047                         }
12048                 }
12049         }
12050
12051         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12052                                                 ptype_num, 0);
12053         if (ret)
12054                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
12055
12056         rte_free(ptype_mapping);
12057         rte_free(ptype);
12058         return ret;
12059 }
12060
12061 void
12062 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12063                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12064 {
12065         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12066         uint32_t proto_num;
12067         struct rte_pmd_i40e_proto_info *proto;
12068         uint32_t buff_size;
12069         uint32_t i;
12070         int ret;
12071
12072         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12073             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12074                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12075                 return;
12076         }
12077
12078         /* get information about protocol number */
12079         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12080                                        (uint8_t *)&proto_num, sizeof(proto_num),
12081                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12082         if (ret) {
12083                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12084                 return;
12085         }
12086         if (!proto_num) {
12087                 PMD_DRV_LOG(INFO, "No new protocol added");
12088                 return;
12089         }
12090
12091         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12092         proto = rte_zmalloc("new_proto", buff_size, 0);
12093         if (!proto) {
12094                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12095                 return;
12096         }
12097
12098         /* get information about protocol list */
12099         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12100                                         (uint8_t *)proto, buff_size,
12101                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12102         if (ret) {
12103                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12104                 rte_free(proto);
12105                 return;
12106         }
12107
12108         /* Check if GTP is supported. */
12109         for (i = 0; i < proto_num; i++) {
12110                 if (!strncmp(proto[i].name, "GTP", 3)) {
12111                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12112                                 pf->gtp_support = true;
12113                         else
12114                                 pf->gtp_support = false;
12115                         break;
12116                 }
12117         }
12118
12119         /* Update customized pctype info */
12120         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12121                                             proto_num, proto, op);
12122         if (ret)
12123                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12124
12125         /* Update customized ptype info */
12126         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12127                                            proto_num, proto, op);
12128         if (ret)
12129                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12130
12131         rte_free(proto);
12132 }
12133
12134 /* Create a QinQ cloud filter
12135  *
12136  * The Fortville NIC has limited resources for tunnel filters,
12137  * so we can only reuse existing filters.
12138  *
12139  * In step 1 we define which Field Vector fields can be used for
12140  * filter types.
12141  * As we do not have the inner tag defined as a field,
12142  * we have to define it first, by reusing one of L1 entries.
12143  *
12144  * In step 2 we are replacing one of existing filter types with
12145  * a new one for QinQ.
12146  * As we reusing L1 and replacing L2, some of the default filter
12147  * types will disappear,which depends on L1 and L2 entries we reuse.
12148  *
12149  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12150  *
12151  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12152  *              later when we define the cloud filter.
12153  *      a.      Valid_flags.replace_cloud = 0
12154  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12155  *      c.      New_filter = 0x10
12156  *      d.      TR bit = 0xff (optional, not used here)
12157  *      e.      Buffer – 2 entries:
12158  *              i.      Byte 0 = 8 (outer vlan FV index).
12159  *                      Byte 1 = 0 (rsv)
12160  *                      Byte 2-3 = 0x0fff
12161  *              ii.     Byte 0 = 37 (inner vlan FV index).
12162  *                      Byte 1 =0 (rsv)
12163  *                      Byte 2-3 = 0x0fff
12164  *
12165  * Step 2:
12166  * 2.   Create cloud filter using two L1 filters entries: stag and
12167  *              new filter(outer vlan+ inner vlan)
12168  *      a.      Valid_flags.replace_cloud = 1
12169  *      b.      Old_filter = 1 (instead of outer IP)
12170  *      c.      New_filter = 0x10
12171  *      d.      Buffer – 2 entries:
12172  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12173  *                      Byte 1-3 = 0 (rsv)
12174  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12175  *                      Byte 9-11 = 0 (rsv)
12176  */
12177 static int
12178 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12179 {
12180         int ret = -ENOTSUP;
12181         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12182         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12183         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12184
12185         if (pf->support_multi_driver) {
12186                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12187                 return ret;
12188         }
12189
12190         /* Init */
12191         memset(&filter_replace, 0,
12192                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12193         memset(&filter_replace_buf, 0,
12194                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12195
12196         /* create L1 filter */
12197         filter_replace.old_filter_type =
12198                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12199         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12200         filter_replace.tr_bit = 0;
12201
12202         /* Prepare the buffer, 2 entries */
12203         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12204         filter_replace_buf.data[0] |=
12205                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12206         /* Field Vector 12b mask */
12207         filter_replace_buf.data[2] = 0xff;
12208         filter_replace_buf.data[3] = 0x0f;
12209         filter_replace_buf.data[4] =
12210                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12211         filter_replace_buf.data[4] |=
12212                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12213         /* Field Vector 12b mask */
12214         filter_replace_buf.data[6] = 0xff;
12215         filter_replace_buf.data[7] = 0x0f;
12216         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12217                         &filter_replace_buf);
12218         if (ret != I40E_SUCCESS)
12219                 return ret;
12220         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12221                     "cloud l1 type is changed from 0x%x to 0x%x",
12222                     filter_replace.old_filter_type,
12223                     filter_replace.new_filter_type);
12224
12225         /* Apply the second L2 cloud filter */
12226         memset(&filter_replace, 0,
12227                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12228         memset(&filter_replace_buf, 0,
12229                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12230
12231         /* create L2 filter, input for L2 filter will be L1 filter  */
12232         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12233         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12234         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12235
12236         /* Prepare the buffer, 2 entries */
12237         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12238         filter_replace_buf.data[0] |=
12239                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12240         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12241         filter_replace_buf.data[4] |=
12242                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12243         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12244                         &filter_replace_buf);
12245         if (!ret) {
12246                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
12247                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12248                             "cloud filter type is changed from 0x%x to 0x%x",
12249                             filter_replace.old_filter_type,
12250                             filter_replace.new_filter_type);
12251         }
12252         return ret;
12253 }
12254
12255 int
12256 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12257                    const struct rte_flow_action_rss *in)
12258 {
12259         if (in->key_len > RTE_DIM(out->key) ||
12260             in->queue_num > RTE_DIM(out->queue))
12261                 return -EINVAL;
12262         out->conf = (struct rte_flow_action_rss){
12263                 .func = in->func,
12264                 .level = in->level,
12265                 .types = in->types,
12266                 .key_len = in->key_len,
12267                 .queue_num = in->queue_num,
12268                 .key = memcpy(out->key, in->key, in->key_len),
12269                 .queue = memcpy(out->queue, in->queue,
12270                                 sizeof(*in->queue) * in->queue_num),
12271         };
12272         return 0;
12273 }
12274
12275 int
12276 i40e_action_rss_same(const struct rte_flow_action_rss *comp,
12277                      const struct rte_flow_action_rss *with)
12278 {
12279         return (comp->func == with->func &&
12280                 comp->level == with->level &&
12281                 comp->types == with->types &&
12282                 comp->key_len == with->key_len &&
12283                 comp->queue_num == with->queue_num &&
12284                 !memcmp(comp->key, with->key, with->key_len) &&
12285                 !memcmp(comp->queue, with->queue,
12286                         sizeof(*with->queue) * with->queue_num));
12287 }
12288
12289 int
12290 i40e_config_rss_filter(struct i40e_pf *pf,
12291                 struct i40e_rte_flow_rss_conf *conf, bool add)
12292 {
12293         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12294         uint32_t i, lut = 0;
12295         uint16_t j, num;
12296         struct rte_eth_rss_conf rss_conf = {
12297                 .rss_key = conf->conf.key_len ?
12298                         (void *)(uintptr_t)conf->conf.key : NULL,
12299                 .rss_key_len = conf->conf.key_len,
12300                 .rss_hf = conf->conf.types,
12301         };
12302         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12303
12304         if (!add) {
12305                 if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
12306                         i40e_pf_disable_rss(pf);
12307                         memset(rss_info, 0,
12308                                 sizeof(struct i40e_rte_flow_rss_conf));
12309                         return 0;
12310                 }
12311                 return -EINVAL;
12312         }
12313
12314         if (rss_info->conf.queue_num)
12315                 return -EINVAL;
12316
12317         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
12318          * It's necessary to calculate the actual PF queues that are configured.
12319          */
12320         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12321                 num = i40e_pf_calc_configured_queues_num(pf);
12322         else
12323                 num = pf->dev_data->nb_rx_queues;
12324
12325         num = RTE_MIN(num, conf->conf.queue_num);
12326         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12327                         num);
12328
12329         if (num == 0) {
12330                 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
12331                 return -ENOTSUP;
12332         }
12333
12334         /* Fill in redirection table */
12335         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12336                 if (j == num)
12337                         j = 0;
12338                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12339                         hw->func_caps.rss_table_entry_width) - 1));
12340                 if ((i & 3) == 3)
12341                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12342         }
12343
12344         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
12345                 i40e_pf_disable_rss(pf);
12346                 return 0;
12347         }
12348         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12349                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12350                 /* Random default keys */
12351                 static uint32_t rss_key_default[] = {0x6b793944,
12352                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12353                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12354                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12355
12356                 rss_conf.rss_key = (uint8_t *)rss_key_default;
12357                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12358                                                         sizeof(uint32_t);
12359         }
12360
12361         i40e_hw_rss_hash_set(pf, &rss_conf);
12362
12363         if (i40e_rss_conf_init(rss_info, &conf->conf))
12364                 return -EINVAL;
12365
12366         return 0;
12367 }
12368
12369 RTE_INIT(i40e_init_log);
12370 static void
12371 i40e_init_log(void)
12372 {
12373         i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12374         if (i40e_logtype_init >= 0)
12375                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12376         i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12377         if (i40e_logtype_driver >= 0)
12378                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12379 }
12380
12381 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12382                               QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12383                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");