net/i40e: remove dependence on Tx queue flags
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_eth_ctrl.h>
28 #include <rte_tailq.h>
29 #include <rte_hash_crc.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45
46 #define I40E_CLEAR_PXE_WAIT_MS     200
47
48 /* Maximun number of capability elements */
49 #define I40E_MAX_CAP_ELE_NUM       128
50
51 /* Wait count and interval */
52 #define I40E_CHK_Q_ENA_COUNT       1000
53 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
54
55 /* Maximun number of VSI */
56 #define I40E_MAX_NUM_VSIS          (384UL)
57
58 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
59
60 /* Flow control default timer */
61 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
62
63 /* Flow control enable fwd bit */
64 #define I40E_PRTMAC_FWD_CTRL   0x00000001
65
66 /* Receive Packet Buffer size */
67 #define I40E_RXPBSIZE (968 * 1024)
68
69 /* Kilobytes shift */
70 #define I40E_KILOSHIFT 10
71
72 /* Flow control default high water */
73 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
74
75 /* Flow control default low water */
76 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
77
78 /* Receive Average Packet Size in Byte*/
79 #define I40E_PACKET_AVERAGE_SIZE 128
80
81 /* Mask of PF interrupt causes */
82 #define I40E_PFINT_ICR0_ENA_MASK ( \
83                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
84                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
85                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
86                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
87                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
91                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
92
93 #define I40E_FLOW_TYPES ( \
94         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
98         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
99         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
104         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
105
106 /* Additional timesync values. */
107 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
108 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
109 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
110 #define I40E_PRTTSYN_TSYNENA     0x80000000
111 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
112 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
113
114 /**
115  * Below are values for writing un-exposed registers suggested
116  * by silicon experts
117  */
118 /* Destination MAC address */
119 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
120 /* Source MAC address */
121 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
122 /* Outer (S-Tag) VLAN tag in the outer L2 header */
123 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
124 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
126 /* Single VLAN tag in the inner L2 header */
127 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
128 /* Source IPv4 address */
129 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
130 /* Destination IPv4 address */
131 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
132 /* Source IPv4 address for X722 */
133 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
134 /* Destination IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
136 /* IPv4 Protocol for X722 */
137 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
138 /* IPv4 Time to Live for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
140 /* IPv4 Type of Service (TOS) */
141 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
142 /* IPv4 Protocol */
143 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
144 /* IPv4 Time to Live */
145 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
146 /* Source IPv6 address */
147 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
148 /* Destination IPv6 address */
149 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
150 /* IPv6 Traffic Class (TC) */
151 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
152 /* IPv6 Next Header */
153 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
154 /* IPv6 Hop Limit */
155 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
156 /* Source L4 port */
157 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
158 /* Destination L4 port */
159 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
160 /* SCTP verification tag */
161 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
162 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
163 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
164 /* Source port of tunneling UDP */
165 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
166 /* Destination port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
168 /* UDP Tunneling ID, NVGRE/GRE key */
169 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
170 /* Last ether type */
171 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
172 /* Tunneling outer destination IPv4 address */
173 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
174 /* Tunneling outer destination IPv6 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
176 /* 1st word of flex payload */
177 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
178 /* 2nd word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
180 /* 3rd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
182 /* 4th word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
184 /* 5th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
186 /* 6th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
188 /* 7th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
190 /* 8th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
192 /* all 8 words flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
194 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
195
196 #define I40E_TRANSLATE_INSET 0
197 #define I40E_TRANSLATE_REG   1
198
199 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
200 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
201 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
202 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
203 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
204 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
205
206 /* PCI offset for querying capability */
207 #define PCI_DEV_CAP_REG            0xA4
208 /* PCI offset for enabling/disabling Extended Tag */
209 #define PCI_DEV_CTRL_REG           0xA8
210 /* Bit mask of Extended Tag capability */
211 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
212 /* Bit shift of Extended Tag enable/disable */
213 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
214 /* Bit mask of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
216
217 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
218 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
219 static int i40e_dev_configure(struct rte_eth_dev *dev);
220 static int i40e_dev_start(struct rte_eth_dev *dev);
221 static void i40e_dev_stop(struct rte_eth_dev *dev);
222 static void i40e_dev_close(struct rte_eth_dev *dev);
223 static int  i40e_dev_reset(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
225 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
229 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
230 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
231                                struct rte_eth_stats *stats);
232 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
233                                struct rte_eth_xstat *xstats, unsigned n);
234 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
235                                      struct rte_eth_xstat_name *xstats_names,
236                                      unsigned limit);
237 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
238 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
239                                             uint16_t queue_id,
240                                             uint8_t stat_idx,
241                                             uint8_t is_rx);
242 static int i40e_fw_version_get(struct rte_eth_dev *dev,
243                                 char *fw_version, size_t fw_size);
244 static void i40e_dev_info_get(struct rte_eth_dev *dev,
245                               struct rte_eth_dev_info *dev_info);
246 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
247                                 uint16_t vlan_id,
248                                 int on);
249 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
250                               enum rte_vlan_type vlan_type,
251                               uint16_t tpid);
252 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
254                                       uint16_t queue,
255                                       int on);
256 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
257 static int i40e_dev_led_on(struct rte_eth_dev *dev);
258 static int i40e_dev_led_off(struct rte_eth_dev *dev);
259 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
260                               struct rte_eth_fc_conf *fc_conf);
261 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
264                                        struct rte_eth_pfc_conf *pfc_conf);
265 static int i40e_macaddr_add(struct rte_eth_dev *dev,
266                             struct ether_addr *mac_addr,
267                             uint32_t index,
268                             uint32_t pool);
269 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
270 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
271                                     struct rte_eth_rss_reta_entry64 *reta_conf,
272                                     uint16_t reta_size);
273 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
274                                    struct rte_eth_rss_reta_entry64 *reta_conf,
275                                    uint16_t reta_size);
276
277 static int i40e_get_cap(struct i40e_hw *hw);
278 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
279 static int i40e_pf_setup(struct i40e_pf *pf);
280 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
281 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
282 static int i40e_dcb_setup(struct rte_eth_dev *dev);
283 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
284                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
285 static void i40e_stat_update_48(struct i40e_hw *hw,
286                                uint32_t hireg,
287                                uint32_t loreg,
288                                bool offset_loaded,
289                                uint64_t *offset,
290                                uint64_t *stat);
291 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
292 static void i40e_dev_interrupt_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294                                 uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
297                         uint32_t base);
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
299                         uint16_t num);
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303                                                 struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307                                              struct i40e_macvlan_filter *mv_f,
308                                              int num,
309                                              uint16_t vlan);
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312                                     struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314                                       struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316                                         struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321                                 enum rte_filter_op filter_op,
322                                 void *arg);
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324                                 enum rte_filter_type filter_type,
325                                 enum rte_filter_op filter_op,
326                                 void *arg);
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328                                   struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
334                                                      uint16_t seid,
335                                                      uint16_t rule_type,
336                                                      uint16_t *entries,
337                                                      uint16_t count,
338                                                      uint16_t rule_id);
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340                         struct rte_eth_mirror_conf *mirror_conf,
341                         uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
343
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347                                            struct timespec *timestamp,
348                                            uint32_t flags);
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350                                            struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
352
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356                                    struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358                                     const struct timespec *timestamp);
359
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
361                                          uint16_t queue_id);
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
363                                           uint16_t queue_id);
364
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366                          struct rte_dev_reg_info *regs);
367
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
369
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371                            struct rte_dev_eeprom_info *eeprom);
372
373 static int i40e_get_module_info(struct rte_eth_dev *dev,
374                                 struct rte_eth_dev_module_info *modinfo);
375 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
376                                   struct rte_dev_eeprom_info *info);
377
378 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
379                                       struct ether_addr *mac_addr);
380
381 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
382
383 static int i40e_ethertype_filter_convert(
384         const struct rte_eth_ethertype_filter *input,
385         struct i40e_ethertype_filter *filter);
386 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
387                                    struct i40e_ethertype_filter *filter);
388
389 static int i40e_tunnel_filter_convert(
390         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
391         struct i40e_tunnel_filter *tunnel_filter);
392 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
393                                 struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
395
396 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
397 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
398 static void i40e_filter_restore(struct i40e_pf *pf);
399 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
400
401 int i40e_logtype_init;
402 int i40e_logtype_driver;
403
404 static const struct rte_pci_id pci_id_i40e_map[] = {
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
425         { .vendor_id = 0, /* sentinel */ },
426 };
427
428 static const struct eth_dev_ops i40e_eth_dev_ops = {
429         .dev_configure                = i40e_dev_configure,
430         .dev_start                    = i40e_dev_start,
431         .dev_stop                     = i40e_dev_stop,
432         .dev_close                    = i40e_dev_close,
433         .dev_reset                    = i40e_dev_reset,
434         .promiscuous_enable           = i40e_dev_promiscuous_enable,
435         .promiscuous_disable          = i40e_dev_promiscuous_disable,
436         .allmulticast_enable          = i40e_dev_allmulticast_enable,
437         .allmulticast_disable         = i40e_dev_allmulticast_disable,
438         .dev_set_link_up              = i40e_dev_set_link_up,
439         .dev_set_link_down            = i40e_dev_set_link_down,
440         .link_update                  = i40e_dev_link_update,
441         .stats_get                    = i40e_dev_stats_get,
442         .xstats_get                   = i40e_dev_xstats_get,
443         .xstats_get_names             = i40e_dev_xstats_get_names,
444         .stats_reset                  = i40e_dev_stats_reset,
445         .xstats_reset                 = i40e_dev_stats_reset,
446         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
447         .fw_version_get               = i40e_fw_version_get,
448         .dev_infos_get                = i40e_dev_info_get,
449         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
450         .vlan_filter_set              = i40e_vlan_filter_set,
451         .vlan_tpid_set                = i40e_vlan_tpid_set,
452         .vlan_offload_set             = i40e_vlan_offload_set,
453         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
454         .vlan_pvid_set                = i40e_vlan_pvid_set,
455         .rx_queue_start               = i40e_dev_rx_queue_start,
456         .rx_queue_stop                = i40e_dev_rx_queue_stop,
457         .tx_queue_start               = i40e_dev_tx_queue_start,
458         .tx_queue_stop                = i40e_dev_tx_queue_stop,
459         .rx_queue_setup               = i40e_dev_rx_queue_setup,
460         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
461         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
462         .rx_queue_release             = i40e_dev_rx_queue_release,
463         .rx_queue_count               = i40e_dev_rx_queue_count,
464         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
465         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
466         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
467         .tx_queue_setup               = i40e_dev_tx_queue_setup,
468         .tx_queue_release             = i40e_dev_tx_queue_release,
469         .dev_led_on                   = i40e_dev_led_on,
470         .dev_led_off                  = i40e_dev_led_off,
471         .flow_ctrl_get                = i40e_flow_ctrl_get,
472         .flow_ctrl_set                = i40e_flow_ctrl_set,
473         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
474         .mac_addr_add                 = i40e_macaddr_add,
475         .mac_addr_remove              = i40e_macaddr_remove,
476         .reta_update                  = i40e_dev_rss_reta_update,
477         .reta_query                   = i40e_dev_rss_reta_query,
478         .rss_hash_update              = i40e_dev_rss_hash_update,
479         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
480         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
481         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
482         .filter_ctrl                  = i40e_dev_filter_ctrl,
483         .rxq_info_get                 = i40e_rxq_info_get,
484         .txq_info_get                 = i40e_txq_info_get,
485         .mirror_rule_set              = i40e_mirror_rule_set,
486         .mirror_rule_reset            = i40e_mirror_rule_reset,
487         .timesync_enable              = i40e_timesync_enable,
488         .timesync_disable             = i40e_timesync_disable,
489         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
490         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
491         .get_dcb_info                 = i40e_dev_get_dcb_info,
492         .timesync_adjust_time         = i40e_timesync_adjust_time,
493         .timesync_read_time           = i40e_timesync_read_time,
494         .timesync_write_time          = i40e_timesync_write_time,
495         .get_reg                      = i40e_get_regs,
496         .get_eeprom_length            = i40e_get_eeprom_length,
497         .get_eeprom                   = i40e_get_eeprom,
498         .get_module_info              = i40e_get_module_info,
499         .get_module_eeprom            = i40e_get_module_eeprom,
500         .mac_addr_set                 = i40e_set_default_mac_addr,
501         .mtu_set                      = i40e_dev_mtu_set,
502         .tm_ops_get                   = i40e_tm_ops_get,
503 };
504
505 /* store statistics names and its offset in stats structure */
506 struct rte_i40e_xstats_name_off {
507         char name[RTE_ETH_XSTATS_NAME_SIZE];
508         unsigned offset;
509 };
510
511 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
512         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
513         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
514         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
515         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
516         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
517                 rx_unknown_protocol)},
518         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
519         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
520         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
521         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
522 };
523
524 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
525                 sizeof(rte_i40e_stats_strings[0]))
526
527 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
528         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
529                 tx_dropped_link_down)},
530         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
531         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
532                 illegal_bytes)},
533         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
534         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
535                 mac_local_faults)},
536         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
537                 mac_remote_faults)},
538         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
539                 rx_length_errors)},
540         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
541         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
542         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
543         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
544         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
545         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_127)},
547         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_255)},
549         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
550                 rx_size_511)},
551         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
552                 rx_size_1023)},
553         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
554                 rx_size_1522)},
555         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
556                 rx_size_big)},
557         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
558                 rx_undersize)},
559         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
560                 rx_oversize)},
561         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
562                 mac_short_packet_dropped)},
563         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
564                 rx_fragments)},
565         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
566         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
567         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_127)},
569         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_255)},
571         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
572                 tx_size_511)},
573         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
574                 tx_size_1023)},
575         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
576                 tx_size_1522)},
577         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
578                 tx_size_big)},
579         {"rx_flow_director_atr_match_packets",
580                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
581         {"rx_flow_director_sb_match_packets",
582                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
583         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
584                 tx_lpi_status)},
585         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
586                 rx_lpi_status)},
587         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
588                 tx_lpi_count)},
589         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
590                 rx_lpi_count)},
591 };
592
593 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
594                 sizeof(rte_i40e_hw_port_strings[0]))
595
596 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
597         {"xon_packets", offsetof(struct i40e_hw_port_stats,
598                 priority_xon_rx)},
599         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xoff_rx)},
601 };
602
603 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
604                 sizeof(rte_i40e_rxq_prio_strings[0]))
605
606 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
607         {"xon_packets", offsetof(struct i40e_hw_port_stats,
608                 priority_xon_tx)},
609         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
610                 priority_xoff_tx)},
611         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
612                 priority_xon_2_xoff)},
613 };
614
615 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
616                 sizeof(rte_i40e_txq_prio_strings[0]))
617
618 static int
619 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
620         struct rte_pci_device *pci_dev)
621 {
622         char name[RTE_ETH_NAME_MAX_LEN];
623         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
624         int i, retval;
625
626         if (pci_dev->device.devargs) {
627                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
628                                 &eth_da);
629                 if (retval)
630                         return retval;
631         }
632
633         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
634                 sizeof(struct i40e_adapter),
635                 eth_dev_pci_specific_init, pci_dev,
636                 eth_i40e_dev_init, NULL);
637
638         if (retval || eth_da.nb_representor_ports < 1)
639                 return retval;
640
641         /* probe VF representor ports */
642         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
643                 pci_dev->device.name);
644
645         if (pf_ethdev == NULL)
646                 return -ENODEV;
647
648         for (i = 0; i < eth_da.nb_representor_ports; i++) {
649                 struct i40e_vf_representor representor = {
650                         .vf_id = eth_da.representor_ports[i],
651                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
652                                 pf_ethdev->data->dev_private)->switch_domain_id,
653                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
654                                 pf_ethdev->data->dev_private)
655                 };
656
657                 /* representor port net_bdf_port */
658                 snprintf(name, sizeof(name), "net_%s_representor_%d",
659                         pci_dev->device.name, eth_da.representor_ports[i]);
660
661                 retval = rte_eth_dev_create(&pci_dev->device, name,
662                         sizeof(struct i40e_vf_representor), NULL, NULL,
663                         i40e_vf_representor_init, &representor);
664
665                 if (retval)
666                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
667                                 "representor %s.", name);
668         }
669
670         return 0;
671 }
672
673 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
674 {
675         struct rte_eth_dev *ethdev;
676
677         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
678         if (!ethdev)
679                 return -ENODEV;
680
681
682         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
683                 return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
684         else
685                 return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
686 }
687
688 static struct rte_pci_driver rte_i40e_pmd = {
689         .id_table = pci_id_i40e_map,
690         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
691                      RTE_PCI_DRV_IOVA_AS_VA,
692         .probe = eth_i40e_pci_probe,
693         .remove = eth_i40e_pci_remove,
694 };
695
696 static inline void
697 i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
698 {
699         i40e_write_rx_ctl(hw, reg_addr, reg_val);
700         PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
701                     "with value 0x%08x",
702                     reg_addr, reg_val);
703 }
704
705 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
706 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
707 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
708
709 #ifndef I40E_GLQF_ORT
710 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
711 #endif
712 #ifndef I40E_GLQF_PIT
713 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
714 #endif
715 #ifndef I40E_GLQF_L3_MAP
716 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
717 #endif
718
719 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
720 {
721         /*
722          * Initialize registers for parsing packet type of QinQ
723          * This should be removed from code once proper
724          * configuration API is added to avoid configuration conflicts
725          * between ports of the same device.
726          */
727         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
728         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
729         i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
730 }
731
732 static inline void i40e_config_automask(struct i40e_pf *pf)
733 {
734         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
735         uint32_t val;
736
737         /* INTENA flag is not auto-cleared for interrupt */
738         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
739         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
740                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
741
742         /* If support multi-driver, PF will use INT0. */
743         if (!pf->support_multi_driver)
744                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
745
746         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
747 }
748
749 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
750
751 /*
752  * Add a ethertype filter to drop all flow control frames transmitted
753  * from VSIs.
754 */
755 static void
756 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
757 {
758         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
759         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
760                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
761                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
762         int ret;
763
764         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
765                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
766                                 pf->main_vsi_seid, 0,
767                                 TRUE, NULL, NULL);
768         if (ret)
769                 PMD_INIT_LOG(ERR,
770                         "Failed to add filter to drop flow control frames from VSIs.");
771 }
772
773 static int
774 floating_veb_list_handler(__rte_unused const char *key,
775                           const char *floating_veb_value,
776                           void *opaque)
777 {
778         int idx = 0;
779         unsigned int count = 0;
780         char *end = NULL;
781         int min, max;
782         bool *vf_floating_veb = opaque;
783
784         while (isblank(*floating_veb_value))
785                 floating_veb_value++;
786
787         /* Reset floating VEB configuration for VFs */
788         for (idx = 0; idx < I40E_MAX_VF; idx++)
789                 vf_floating_veb[idx] = false;
790
791         min = I40E_MAX_VF;
792         do {
793                 while (isblank(*floating_veb_value))
794                         floating_veb_value++;
795                 if (*floating_veb_value == '\0')
796                         return -1;
797                 errno = 0;
798                 idx = strtoul(floating_veb_value, &end, 10);
799                 if (errno || end == NULL)
800                         return -1;
801                 while (isblank(*end))
802                         end++;
803                 if (*end == '-') {
804                         min = idx;
805                 } else if ((*end == ';') || (*end == '\0')) {
806                         max = idx;
807                         if (min == I40E_MAX_VF)
808                                 min = idx;
809                         if (max >= I40E_MAX_VF)
810                                 max = I40E_MAX_VF - 1;
811                         for (idx = min; idx <= max; idx++) {
812                                 vf_floating_veb[idx] = true;
813                                 count++;
814                         }
815                         min = I40E_MAX_VF;
816                 } else {
817                         return -1;
818                 }
819                 floating_veb_value = end + 1;
820         } while (*end != '\0');
821
822         if (count == 0)
823                 return -1;
824
825         return 0;
826 }
827
828 static void
829 config_vf_floating_veb(struct rte_devargs *devargs,
830                        uint16_t floating_veb,
831                        bool *vf_floating_veb)
832 {
833         struct rte_kvargs *kvlist;
834         int i;
835         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
836
837         if (!floating_veb)
838                 return;
839         /* All the VFs attach to the floating VEB by default
840          * when the floating VEB is enabled.
841          */
842         for (i = 0; i < I40E_MAX_VF; i++)
843                 vf_floating_veb[i] = true;
844
845         if (devargs == NULL)
846                 return;
847
848         kvlist = rte_kvargs_parse(devargs->args, NULL);
849         if (kvlist == NULL)
850                 return;
851
852         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
853                 rte_kvargs_free(kvlist);
854                 return;
855         }
856         /* When the floating_veb_list parameter exists, all the VFs
857          * will attach to the legacy VEB firstly, then configure VFs
858          * to the floating VEB according to the floating_veb_list.
859          */
860         if (rte_kvargs_process(kvlist, floating_veb_list,
861                                floating_veb_list_handler,
862                                vf_floating_veb) < 0) {
863                 rte_kvargs_free(kvlist);
864                 return;
865         }
866         rte_kvargs_free(kvlist);
867 }
868
869 static int
870 i40e_check_floating_handler(__rte_unused const char *key,
871                             const char *value,
872                             __rte_unused void *opaque)
873 {
874         if (strcmp(value, "1"))
875                 return -1;
876
877         return 0;
878 }
879
880 static int
881 is_floating_veb_supported(struct rte_devargs *devargs)
882 {
883         struct rte_kvargs *kvlist;
884         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
885
886         if (devargs == NULL)
887                 return 0;
888
889         kvlist = rte_kvargs_parse(devargs->args, NULL);
890         if (kvlist == NULL)
891                 return 0;
892
893         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
894                 rte_kvargs_free(kvlist);
895                 return 0;
896         }
897         /* Floating VEB is enabled when there's key-value:
898          * enable_floating_veb=1
899          */
900         if (rte_kvargs_process(kvlist, floating_veb_key,
901                                i40e_check_floating_handler, NULL) < 0) {
902                 rte_kvargs_free(kvlist);
903                 return 0;
904         }
905         rte_kvargs_free(kvlist);
906
907         return 1;
908 }
909
910 static void
911 config_floating_veb(struct rte_eth_dev *dev)
912 {
913         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
914         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
915         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
916
917         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
918
919         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
920                 pf->floating_veb =
921                         is_floating_veb_supported(pci_dev->device.devargs);
922                 config_vf_floating_veb(pci_dev->device.devargs,
923                                        pf->floating_veb,
924                                        pf->floating_veb_list);
925         } else {
926                 pf->floating_veb = false;
927         }
928 }
929
930 #define I40E_L2_TAGS_S_TAG_SHIFT 1
931 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
932
933 static int
934 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
935 {
936         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
937         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
938         char ethertype_hash_name[RTE_HASH_NAMESIZE];
939         int ret;
940
941         struct rte_hash_parameters ethertype_hash_params = {
942                 .name = ethertype_hash_name,
943                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
944                 .key_len = sizeof(struct i40e_ethertype_filter_input),
945                 .hash_func = rte_hash_crc,
946                 .hash_func_init_val = 0,
947                 .socket_id = rte_socket_id(),
948         };
949
950         /* Initialize ethertype filter rule list and hash */
951         TAILQ_INIT(&ethertype_rule->ethertype_list);
952         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
953                  "ethertype_%s", dev->device->name);
954         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
955         if (!ethertype_rule->hash_table) {
956                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
957                 return -EINVAL;
958         }
959         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
960                                        sizeof(struct i40e_ethertype_filter *) *
961                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
962                                        0);
963         if (!ethertype_rule->hash_map) {
964                 PMD_INIT_LOG(ERR,
965                              "Failed to allocate memory for ethertype hash map!");
966                 ret = -ENOMEM;
967                 goto err_ethertype_hash_map_alloc;
968         }
969
970         return 0;
971
972 err_ethertype_hash_map_alloc:
973         rte_hash_free(ethertype_rule->hash_table);
974
975         return ret;
976 }
977
978 static int
979 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
980 {
981         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
982         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
983         char tunnel_hash_name[RTE_HASH_NAMESIZE];
984         int ret;
985
986         struct rte_hash_parameters tunnel_hash_params = {
987                 .name = tunnel_hash_name,
988                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
989                 .key_len = sizeof(struct i40e_tunnel_filter_input),
990                 .hash_func = rte_hash_crc,
991                 .hash_func_init_val = 0,
992                 .socket_id = rte_socket_id(),
993         };
994
995         /* Initialize tunnel filter rule list and hash */
996         TAILQ_INIT(&tunnel_rule->tunnel_list);
997         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
998                  "tunnel_%s", dev->device->name);
999         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1000         if (!tunnel_rule->hash_table) {
1001                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1002                 return -EINVAL;
1003         }
1004         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1005                                     sizeof(struct i40e_tunnel_filter *) *
1006                                     I40E_MAX_TUNNEL_FILTER_NUM,
1007                                     0);
1008         if (!tunnel_rule->hash_map) {
1009                 PMD_INIT_LOG(ERR,
1010                              "Failed to allocate memory for tunnel hash map!");
1011                 ret = -ENOMEM;
1012                 goto err_tunnel_hash_map_alloc;
1013         }
1014
1015         return 0;
1016
1017 err_tunnel_hash_map_alloc:
1018         rte_hash_free(tunnel_rule->hash_table);
1019
1020         return ret;
1021 }
1022
1023 static int
1024 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1025 {
1026         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1027         struct i40e_fdir_info *fdir_info = &pf->fdir;
1028         char fdir_hash_name[RTE_HASH_NAMESIZE];
1029         int ret;
1030
1031         struct rte_hash_parameters fdir_hash_params = {
1032                 .name = fdir_hash_name,
1033                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1034                 .key_len = sizeof(struct i40e_fdir_input),
1035                 .hash_func = rte_hash_crc,
1036                 .hash_func_init_val = 0,
1037                 .socket_id = rte_socket_id(),
1038         };
1039
1040         /* Initialize flow director filter rule list and hash */
1041         TAILQ_INIT(&fdir_info->fdir_list);
1042         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1043                  "fdir_%s", dev->device->name);
1044         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1045         if (!fdir_info->hash_table) {
1046                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1047                 return -EINVAL;
1048         }
1049         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1050                                           sizeof(struct i40e_fdir_filter *) *
1051                                           I40E_MAX_FDIR_FILTER_NUM,
1052                                           0);
1053         if (!fdir_info->hash_map) {
1054                 PMD_INIT_LOG(ERR,
1055                              "Failed to allocate memory for fdir hash map!");
1056                 ret = -ENOMEM;
1057                 goto err_fdir_hash_map_alloc;
1058         }
1059         return 0;
1060
1061 err_fdir_hash_map_alloc:
1062         rte_hash_free(fdir_info->hash_table);
1063
1064         return ret;
1065 }
1066
1067 static void
1068 i40e_init_customized_info(struct i40e_pf *pf)
1069 {
1070         int i;
1071
1072         /* Initialize customized pctype */
1073         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1074                 pf->customized_pctype[i].index = i;
1075                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1076                 pf->customized_pctype[i].valid = false;
1077         }
1078
1079         pf->gtp_support = false;
1080 }
1081
1082 void
1083 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1084 {
1085         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1087         struct i40e_queue_regions *info = &pf->queue_region;
1088         uint16_t i;
1089
1090         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1091                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1092
1093         memset(info, 0, sizeof(struct i40e_queue_regions));
1094 }
1095
1096 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
1097
1098 static int
1099 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1100                                const char *value,
1101                                void *opaque)
1102 {
1103         struct i40e_pf *pf;
1104         unsigned long support_multi_driver;
1105         char *end;
1106
1107         pf = (struct i40e_pf *)opaque;
1108
1109         errno = 0;
1110         support_multi_driver = strtoul(value, &end, 10);
1111         if (errno != 0 || end == value || *end != 0) {
1112                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1113                 return -(EINVAL);
1114         }
1115
1116         if (support_multi_driver == 1 || support_multi_driver == 0)
1117                 pf->support_multi_driver = (bool)support_multi_driver;
1118         else
1119                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1120                             "enable global configuration by default."
1121                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1122         return 0;
1123 }
1124
1125 static int
1126 i40e_support_multi_driver(struct rte_eth_dev *dev)
1127 {
1128         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1129         static const char *const valid_keys[] = {
1130                 ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
1131         struct rte_kvargs *kvlist;
1132
1133         /* Enable global configuration by default */
1134         pf->support_multi_driver = false;
1135
1136         if (!dev->device->devargs)
1137                 return 0;
1138
1139         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1140         if (!kvlist)
1141                 return -EINVAL;
1142
1143         if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
1144                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1145                             "the first invalid or last valid one is used !",
1146                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1147
1148         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1149                                i40e_parse_multi_drv_handler, pf) < 0) {
1150                 rte_kvargs_free(kvlist);
1151                 return -EINVAL;
1152         }
1153
1154         rte_kvargs_free(kvlist);
1155         return 0;
1156 }
1157
1158 static int
1159 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1160 {
1161         struct rte_pci_device *pci_dev;
1162         struct rte_intr_handle *intr_handle;
1163         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1164         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165         struct i40e_vsi *vsi;
1166         int ret;
1167         uint32_t len;
1168         uint8_t aq_fail = 0;
1169
1170         PMD_INIT_FUNC_TRACE();
1171
1172         dev->dev_ops = &i40e_eth_dev_ops;
1173         dev->rx_pkt_burst = i40e_recv_pkts;
1174         dev->tx_pkt_burst = i40e_xmit_pkts;
1175         dev->tx_pkt_prepare = i40e_prep_pkts;
1176
1177         /* for secondary processes, we don't initialise any further as primary
1178          * has already done this work. Only check we don't need a different
1179          * RX function */
1180         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1181                 i40e_set_rx_function(dev);
1182                 i40e_set_tx_function(dev);
1183                 return 0;
1184         }
1185         i40e_set_default_ptype_table(dev);
1186         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1187         intr_handle = &pci_dev->intr_handle;
1188
1189         rte_eth_copy_pci_info(dev, pci_dev);
1190
1191         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1192         pf->adapter->eth_dev = dev;
1193         pf->dev_data = dev->data;
1194
1195         hw->back = I40E_PF_TO_ADAPTER(pf);
1196         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1197         if (!hw->hw_addr) {
1198                 PMD_INIT_LOG(ERR,
1199                         "Hardware is not available, as address is NULL");
1200                 return -ENODEV;
1201         }
1202
1203         hw->vendor_id = pci_dev->id.vendor_id;
1204         hw->device_id = pci_dev->id.device_id;
1205         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1206         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1207         hw->bus.device = pci_dev->addr.devid;
1208         hw->bus.func = pci_dev->addr.function;
1209         hw->adapter_stopped = 0;
1210
1211         /* Check if need to support multi-driver */
1212         i40e_support_multi_driver(dev);
1213
1214         /* Make sure all is clean before doing PF reset */
1215         i40e_clear_hw(hw);
1216
1217         /* Initialize the hardware */
1218         i40e_hw_init(dev);
1219
1220         /* Reset here to make sure all is clean for each PF */
1221         ret = i40e_pf_reset(hw);
1222         if (ret) {
1223                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1224                 return ret;
1225         }
1226
1227         /* Initialize the shared code (base driver) */
1228         ret = i40e_init_shared_code(hw);
1229         if (ret) {
1230                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1231                 return ret;
1232         }
1233
1234         i40e_config_automask(pf);
1235
1236         i40e_set_default_pctype_table(dev);
1237
1238         /*
1239          * To work around the NVM issue, initialize registers
1240          * for packet type of QinQ by software.
1241          * It should be removed once issues are fixed in NVM.
1242          */
1243         if (!pf->support_multi_driver)
1244                 i40e_GLQF_reg_init(hw);
1245
1246         /* Initialize the input set for filters (hash and fd) to default value */
1247         i40e_filter_input_set_init(pf);
1248
1249         /* Initialize the parameters for adminq */
1250         i40e_init_adminq_parameter(hw);
1251         ret = i40e_init_adminq(hw);
1252         if (ret != I40E_SUCCESS) {
1253                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1254                 return -EIO;
1255         }
1256         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1257                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1258                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1259                      ((hw->nvm.version >> 12) & 0xf),
1260                      ((hw->nvm.version >> 4) & 0xff),
1261                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1262
1263         /* initialise the L3_MAP register */
1264         if (!pf->support_multi_driver) {
1265                 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1266                                                    0x00000028,  NULL);
1267                 if (ret)
1268                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1269                                      ret);
1270                 PMD_INIT_LOG(DEBUG,
1271                              "Global register 0x%08x is changed with 0x28",
1272                              I40E_GLQF_L3_MAP(40));
1273                 i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
1274         }
1275
1276         /* Need the special FW version to support floating VEB */
1277         config_floating_veb(dev);
1278         /* Clear PXE mode */
1279         i40e_clear_pxe_mode(hw);
1280         i40e_dev_sync_phy_type(hw);
1281
1282         /*
1283          * On X710, performance number is far from the expectation on recent
1284          * firmware versions. The fix for this issue may not be integrated in
1285          * the following firmware version. So the workaround in software driver
1286          * is needed. It needs to modify the initial values of 3 internal only
1287          * registers. Note that the workaround can be removed when it is fixed
1288          * in firmware in the future.
1289          */
1290         i40e_configure_registers(hw);
1291
1292         /* Get hw capabilities */
1293         ret = i40e_get_cap(hw);
1294         if (ret != I40E_SUCCESS) {
1295                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1296                 goto err_get_capabilities;
1297         }
1298
1299         /* Initialize parameters for PF */
1300         ret = i40e_pf_parameter_init(dev);
1301         if (ret != 0) {
1302                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1303                 goto err_parameter_init;
1304         }
1305
1306         /* Initialize the queue management */
1307         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1308         if (ret < 0) {
1309                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1310                 goto err_qp_pool_init;
1311         }
1312         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1313                                 hw->func_caps.num_msix_vectors - 1);
1314         if (ret < 0) {
1315                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1316                 goto err_msix_pool_init;
1317         }
1318
1319         /* Initialize lan hmc */
1320         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1321                                 hw->func_caps.num_rx_qp, 0, 0);
1322         if (ret != I40E_SUCCESS) {
1323                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1324                 goto err_init_lan_hmc;
1325         }
1326
1327         /* Configure lan hmc */
1328         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1329         if (ret != I40E_SUCCESS) {
1330                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1331                 goto err_configure_lan_hmc;
1332         }
1333
1334         /* Get and check the mac address */
1335         i40e_get_mac_addr(hw, hw->mac.addr);
1336         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1337                 PMD_INIT_LOG(ERR, "mac address is not valid");
1338                 ret = -EIO;
1339                 goto err_get_mac_addr;
1340         }
1341         /* Copy the permanent MAC address */
1342         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1343                         (struct ether_addr *) hw->mac.perm_addr);
1344
1345         /* Disable flow control */
1346         hw->fc.requested_mode = I40E_FC_NONE;
1347         i40e_set_fc(hw, &aq_fail, TRUE);
1348
1349         /* Set the global registers with default ether type value */
1350         if (!pf->support_multi_driver) {
1351                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1352                                          ETHER_TYPE_VLAN);
1353                 if (ret != I40E_SUCCESS) {
1354                         PMD_INIT_LOG(ERR,
1355                                      "Failed to set the default outer "
1356                                      "VLAN ether type");
1357                         goto err_setup_pf_switch;
1358                 }
1359         }
1360
1361         /* PF setup, which includes VSI setup */
1362         ret = i40e_pf_setup(pf);
1363         if (ret) {
1364                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1365                 goto err_setup_pf_switch;
1366         }
1367
1368         /* reset all stats of the device, including pf and main vsi */
1369         i40e_dev_stats_reset(dev);
1370
1371         vsi = pf->main_vsi;
1372
1373         /* Disable double vlan by default */
1374         i40e_vsi_config_double_vlan(vsi, FALSE);
1375
1376         /* Disable S-TAG identification when floating_veb is disabled */
1377         if (!pf->floating_veb) {
1378                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1379                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1380                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1381                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1382                 }
1383         }
1384
1385         if (!vsi->max_macaddrs)
1386                 len = ETHER_ADDR_LEN;
1387         else
1388                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1389
1390         /* Should be after VSI initialized */
1391         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1392         if (!dev->data->mac_addrs) {
1393                 PMD_INIT_LOG(ERR,
1394                         "Failed to allocated memory for storing mac address");
1395                 goto err_mac_alloc;
1396         }
1397         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1398                                         &dev->data->mac_addrs[0]);
1399
1400         /* Init dcb to sw mode by default */
1401         ret = i40e_dcb_init_configure(dev, TRUE);
1402         if (ret != I40E_SUCCESS) {
1403                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1404                 pf->flags &= ~I40E_FLAG_DCB;
1405         }
1406         /* Update HW struct after DCB configuration */
1407         i40e_get_cap(hw);
1408
1409         /* initialize pf host driver to setup SRIOV resource if applicable */
1410         i40e_pf_host_init(dev);
1411
1412         /* register callback func to eal lib */
1413         rte_intr_callback_register(intr_handle,
1414                                    i40e_dev_interrupt_handler, dev);
1415
1416         /* configure and enable device interrupt */
1417         i40e_pf_config_irq0(hw, TRUE);
1418         i40e_pf_enable_irq0(hw);
1419
1420         /* enable uio intr after callback register */
1421         rte_intr_enable(intr_handle);
1422
1423         /* By default disable flexible payload in global configuration */
1424         if (!pf->support_multi_driver)
1425                 i40e_flex_payload_reg_set_default(hw);
1426
1427         /*
1428          * Add an ethertype filter to drop all flow control frames transmitted
1429          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1430          * frames to wire.
1431          */
1432         i40e_add_tx_flow_control_drop_filter(pf);
1433
1434         /* Set the max frame size to 0x2600 by default,
1435          * in case other drivers changed the default value.
1436          */
1437         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1438
1439         /* initialize mirror rule list */
1440         TAILQ_INIT(&pf->mirror_list);
1441
1442         /* initialize Traffic Manager configuration */
1443         i40e_tm_conf_init(dev);
1444
1445         /* Initialize customized information */
1446         i40e_init_customized_info(pf);
1447
1448         ret = i40e_init_ethtype_filter_list(dev);
1449         if (ret < 0)
1450                 goto err_init_ethtype_filter_list;
1451         ret = i40e_init_tunnel_filter_list(dev);
1452         if (ret < 0)
1453                 goto err_init_tunnel_filter_list;
1454         ret = i40e_init_fdir_filter_list(dev);
1455         if (ret < 0)
1456                 goto err_init_fdir_filter_list;
1457
1458         /* initialize queue region configuration */
1459         i40e_init_queue_region_conf(dev);
1460
1461         /* initialize rss configuration from rte_flow */
1462         memset(&pf->rss_info, 0,
1463                 sizeof(struct i40e_rte_flow_rss_conf));
1464
1465         return 0;
1466
1467 err_init_fdir_filter_list:
1468         rte_free(pf->tunnel.hash_table);
1469         rte_free(pf->tunnel.hash_map);
1470 err_init_tunnel_filter_list:
1471         rte_free(pf->ethertype.hash_table);
1472         rte_free(pf->ethertype.hash_map);
1473 err_init_ethtype_filter_list:
1474         rte_free(dev->data->mac_addrs);
1475 err_mac_alloc:
1476         i40e_vsi_release(pf->main_vsi);
1477 err_setup_pf_switch:
1478 err_get_mac_addr:
1479 err_configure_lan_hmc:
1480         (void)i40e_shutdown_lan_hmc(hw);
1481 err_init_lan_hmc:
1482         i40e_res_pool_destroy(&pf->msix_pool);
1483 err_msix_pool_init:
1484         i40e_res_pool_destroy(&pf->qp_pool);
1485 err_qp_pool_init:
1486 err_parameter_init:
1487 err_get_capabilities:
1488         (void)i40e_shutdown_adminq(hw);
1489
1490         return ret;
1491 }
1492
1493 static void
1494 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1495 {
1496         struct i40e_ethertype_filter *p_ethertype;
1497         struct i40e_ethertype_rule *ethertype_rule;
1498
1499         ethertype_rule = &pf->ethertype;
1500         /* Remove all ethertype filter rules and hash */
1501         if (ethertype_rule->hash_map)
1502                 rte_free(ethertype_rule->hash_map);
1503         if (ethertype_rule->hash_table)
1504                 rte_hash_free(ethertype_rule->hash_table);
1505
1506         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1507                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1508                              p_ethertype, rules);
1509                 rte_free(p_ethertype);
1510         }
1511 }
1512
1513 static void
1514 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1515 {
1516         struct i40e_tunnel_filter *p_tunnel;
1517         struct i40e_tunnel_rule *tunnel_rule;
1518
1519         tunnel_rule = &pf->tunnel;
1520         /* Remove all tunnel director rules and hash */
1521         if (tunnel_rule->hash_map)
1522                 rte_free(tunnel_rule->hash_map);
1523         if (tunnel_rule->hash_table)
1524                 rte_hash_free(tunnel_rule->hash_table);
1525
1526         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1527                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1528                 rte_free(p_tunnel);
1529         }
1530 }
1531
1532 static void
1533 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1534 {
1535         struct i40e_fdir_filter *p_fdir;
1536         struct i40e_fdir_info *fdir_info;
1537
1538         fdir_info = &pf->fdir;
1539         /* Remove all flow director rules and hash */
1540         if (fdir_info->hash_map)
1541                 rte_free(fdir_info->hash_map);
1542         if (fdir_info->hash_table)
1543                 rte_hash_free(fdir_info->hash_table);
1544
1545         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1546                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1547                 rte_free(p_fdir);
1548         }
1549 }
1550
1551 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1552 {
1553         /*
1554          * Disable by default flexible payload
1555          * for corresponding L2/L3/L4 layers.
1556          */
1557         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1558         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1559         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1560         i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
1561 }
1562
1563 static int
1564 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1565 {
1566         struct i40e_pf *pf;
1567         struct rte_pci_device *pci_dev;
1568         struct rte_intr_handle *intr_handle;
1569         struct i40e_hw *hw;
1570         struct i40e_filter_control_settings settings;
1571         struct rte_flow *p_flow;
1572         int ret;
1573         uint8_t aq_fail = 0;
1574         int retries = 0;
1575
1576         PMD_INIT_FUNC_TRACE();
1577
1578         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1579                 return 0;
1580
1581         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1582         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1583         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1584         intr_handle = &pci_dev->intr_handle;
1585
1586         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
1587         if (ret)
1588                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
1589
1590         if (hw->adapter_stopped == 0)
1591                 i40e_dev_close(dev);
1592
1593         dev->dev_ops = NULL;
1594         dev->rx_pkt_burst = NULL;
1595         dev->tx_pkt_burst = NULL;
1596
1597         /* Clear PXE mode */
1598         i40e_clear_pxe_mode(hw);
1599
1600         /* Unconfigure filter control */
1601         memset(&settings, 0, sizeof(settings));
1602         ret = i40e_set_filter_control(hw, &settings);
1603         if (ret)
1604                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1605                                         ret);
1606
1607         /* Disable flow control */
1608         hw->fc.requested_mode = I40E_FC_NONE;
1609         i40e_set_fc(hw, &aq_fail, TRUE);
1610
1611         /* uninitialize pf host driver */
1612         i40e_pf_host_uninit(dev);
1613
1614         rte_free(dev->data->mac_addrs);
1615         dev->data->mac_addrs = NULL;
1616
1617         /* disable uio intr before callback unregister */
1618         rte_intr_disable(intr_handle);
1619
1620         /* unregister callback func to eal lib */
1621         do {
1622                 ret = rte_intr_callback_unregister(intr_handle,
1623                                 i40e_dev_interrupt_handler, dev);
1624                 if (ret >= 0) {
1625                         break;
1626                 } else if (ret != -EAGAIN) {
1627                         PMD_INIT_LOG(ERR,
1628                                  "intr callback unregister failed: %d",
1629                                  ret);
1630                         return ret;
1631                 }
1632                 i40e_msec_delay(500);
1633         } while (retries++ < 5);
1634
1635         i40e_rm_ethtype_filter_list(pf);
1636         i40e_rm_tunnel_filter_list(pf);
1637         i40e_rm_fdir_filter_list(pf);
1638
1639         /* Remove all flows */
1640         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1641                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1642                 rte_free(p_flow);
1643         }
1644
1645         /* Remove all Traffic Manager configuration */
1646         i40e_tm_conf_uninit(dev);
1647
1648         return 0;
1649 }
1650
1651 static int
1652 i40e_dev_configure(struct rte_eth_dev *dev)
1653 {
1654         struct i40e_adapter *ad =
1655                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1657         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1659         int i, ret;
1660
1661         ret = i40e_dev_sync_phy_type(hw);
1662         if (ret)
1663                 return ret;
1664
1665         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1666          * bulk allocation or vector Rx preconditions we will reset it.
1667          */
1668         ad->rx_bulk_alloc_allowed = true;
1669         ad->rx_vec_allowed = true;
1670         ad->tx_simple_allowed = true;
1671         ad->tx_vec_allowed = true;
1672
1673         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1674                 ret = i40e_fdir_setup(pf);
1675                 if (ret != I40E_SUCCESS) {
1676                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1677                         return -ENOTSUP;
1678                 }
1679                 ret = i40e_fdir_configure(dev);
1680                 if (ret < 0) {
1681                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1682                         goto err;
1683                 }
1684         } else
1685                 i40e_fdir_teardown(pf);
1686
1687         ret = i40e_dev_init_vlan(dev);
1688         if (ret < 0)
1689                 goto err;
1690
1691         /* VMDQ setup.
1692          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1693          *  RSS setting have different requirements.
1694          *  General PMD driver call sequence are NIC init, configure,
1695          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1696          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1697          *  applicable. So, VMDQ setting has to be done before
1698          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1699          *  For RSS setting, it will try to calculate actual configured RX queue
1700          *  number, which will be available after rx_queue_setup(). dev_start()
1701          *  function is good to place RSS setup.
1702          */
1703         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1704                 ret = i40e_vmdq_setup(dev);
1705                 if (ret)
1706                         goto err;
1707         }
1708
1709         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1710                 ret = i40e_dcb_setup(dev);
1711                 if (ret) {
1712                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1713                         goto err_dcb;
1714                 }
1715         }
1716
1717         TAILQ_INIT(&pf->flow_list);
1718
1719         return 0;
1720
1721 err_dcb:
1722         /* need to release vmdq resource if exists */
1723         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1724                 i40e_vsi_release(pf->vmdq[i].vsi);
1725                 pf->vmdq[i].vsi = NULL;
1726         }
1727         rte_free(pf->vmdq);
1728         pf->vmdq = NULL;
1729 err:
1730         /* need to release fdir resource if exists */
1731         i40e_fdir_teardown(pf);
1732         return ret;
1733 }
1734
1735 void
1736 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1737 {
1738         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1739         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1740         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1741         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1742         uint16_t msix_vect = vsi->msix_intr;
1743         uint16_t i;
1744
1745         for (i = 0; i < vsi->nb_qps; i++) {
1746                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1747                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1748                 rte_wmb();
1749         }
1750
1751         if (vsi->type != I40E_VSI_SRIOV) {
1752                 if (!rte_intr_allow_others(intr_handle)) {
1753                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1754                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1755                         I40E_WRITE_REG(hw,
1756                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1757                                        0);
1758                 } else {
1759                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1760                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1761                         I40E_WRITE_REG(hw,
1762                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1763                                                        msix_vect - 1), 0);
1764                 }
1765         } else {
1766                 uint32_t reg;
1767                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1768                         vsi->user_param + (msix_vect - 1);
1769
1770                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1771                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1772         }
1773         I40E_WRITE_FLUSH(hw);
1774 }
1775
1776 static void
1777 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1778                        int base_queue, int nb_queue,
1779                        uint16_t itr_idx)
1780 {
1781         int i;
1782         uint32_t val;
1783         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1784         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1785
1786         /* Bind all RX queues to allocated MSIX interrupt */
1787         for (i = 0; i < nb_queue; i++) {
1788                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1789                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1790                         ((base_queue + i + 1) <<
1791                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1792                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1793                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1794
1795                 if (i == nb_queue - 1)
1796                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1797                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1798         }
1799
1800         /* Write first RX queue to Link list register as the head element */
1801         if (vsi->type != I40E_VSI_SRIOV) {
1802                 uint16_t interval =
1803                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
1804                                                pf->support_multi_driver);
1805
1806                 if (msix_vect == I40E_MISC_VEC_ID) {
1807                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1808                                        (base_queue <<
1809                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1810                                        (0x0 <<
1811                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1812                         I40E_WRITE_REG(hw,
1813                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1814                                        interval);
1815                 } else {
1816                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1817                                        (base_queue <<
1818                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1819                                        (0x0 <<
1820                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1821                         I40E_WRITE_REG(hw,
1822                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1823                                                        msix_vect - 1),
1824                                        interval);
1825                 }
1826         } else {
1827                 uint32_t reg;
1828
1829                 if (msix_vect == I40E_MISC_VEC_ID) {
1830                         I40E_WRITE_REG(hw,
1831                                        I40E_VPINT_LNKLST0(vsi->user_param),
1832                                        (base_queue <<
1833                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1834                                        (0x0 <<
1835                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1836                 } else {
1837                         /* num_msix_vectors_vf needs to minus irq0 */
1838                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1839                                 vsi->user_param + (msix_vect - 1);
1840
1841                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1842                                        (base_queue <<
1843                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1844                                        (0x0 <<
1845                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1846                 }
1847         }
1848
1849         I40E_WRITE_FLUSH(hw);
1850 }
1851
1852 void
1853 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1854 {
1855         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1856         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1857         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1858         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1859         uint16_t msix_vect = vsi->msix_intr;
1860         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1861         uint16_t queue_idx = 0;
1862         int record = 0;
1863         int i;
1864
1865         for (i = 0; i < vsi->nb_qps; i++) {
1866                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1867                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1868         }
1869
1870         /* VF bind interrupt */
1871         if (vsi->type == I40E_VSI_SRIOV) {
1872                 __vsi_queues_bind_intr(vsi, msix_vect,
1873                                        vsi->base_queue, vsi->nb_qps,
1874                                        itr_idx);
1875                 return;
1876         }
1877
1878         /* PF & VMDq bind interrupt */
1879         if (rte_intr_dp_is_en(intr_handle)) {
1880                 if (vsi->type == I40E_VSI_MAIN) {
1881                         queue_idx = 0;
1882                         record = 1;
1883                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1884                         struct i40e_vsi *main_vsi =
1885                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1886                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1887                         record = 1;
1888                 }
1889         }
1890
1891         for (i = 0; i < vsi->nb_used_qps; i++) {
1892                 if (nb_msix <= 1) {
1893                         if (!rte_intr_allow_others(intr_handle))
1894                                 /* allow to share MISC_VEC_ID */
1895                                 msix_vect = I40E_MISC_VEC_ID;
1896
1897                         /* no enough msix_vect, map all to one */
1898                         __vsi_queues_bind_intr(vsi, msix_vect,
1899                                                vsi->base_queue + i,
1900                                                vsi->nb_used_qps - i,
1901                                                itr_idx);
1902                         for (; !!record && i < vsi->nb_used_qps; i++)
1903                                 intr_handle->intr_vec[queue_idx + i] =
1904                                         msix_vect;
1905                         break;
1906                 }
1907                 /* 1:1 queue/msix_vect mapping */
1908                 __vsi_queues_bind_intr(vsi, msix_vect,
1909                                        vsi->base_queue + i, 1,
1910                                        itr_idx);
1911                 if (!!record)
1912                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1913
1914                 msix_vect++;
1915                 nb_msix--;
1916         }
1917 }
1918
1919 static void
1920 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1921 {
1922         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1923         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1924         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1925         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1926         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1927         uint16_t msix_intr, i;
1928
1929         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1930                 for (i = 0; i < vsi->nb_msix; i++) {
1931                         msix_intr = vsi->msix_intr + i;
1932                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1933                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1934                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1935                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1936                 }
1937         else
1938                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1939                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1940                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1941                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1942
1943         I40E_WRITE_FLUSH(hw);
1944 }
1945
1946 static void
1947 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1948 {
1949         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1950         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1951         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1952         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1953         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1954         uint16_t msix_intr, i;
1955
1956         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1957                 for (i = 0; i < vsi->nb_msix; i++) {
1958                         msix_intr = vsi->msix_intr + i;
1959                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1960                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1961                 }
1962         else
1963                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1964                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1965
1966         I40E_WRITE_FLUSH(hw);
1967 }
1968
1969 static inline uint8_t
1970 i40e_parse_link_speeds(uint16_t link_speeds)
1971 {
1972         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1973
1974         if (link_speeds & ETH_LINK_SPEED_40G)
1975                 link_speed |= I40E_LINK_SPEED_40GB;
1976         if (link_speeds & ETH_LINK_SPEED_25G)
1977                 link_speed |= I40E_LINK_SPEED_25GB;
1978         if (link_speeds & ETH_LINK_SPEED_20G)
1979                 link_speed |= I40E_LINK_SPEED_20GB;
1980         if (link_speeds & ETH_LINK_SPEED_10G)
1981                 link_speed |= I40E_LINK_SPEED_10GB;
1982         if (link_speeds & ETH_LINK_SPEED_1G)
1983                 link_speed |= I40E_LINK_SPEED_1GB;
1984         if (link_speeds & ETH_LINK_SPEED_100M)
1985                 link_speed |= I40E_LINK_SPEED_100MB;
1986
1987         return link_speed;
1988 }
1989
1990 static int
1991 i40e_phy_conf_link(struct i40e_hw *hw,
1992                    uint8_t abilities,
1993                    uint8_t force_speed,
1994                    bool is_up)
1995 {
1996         enum i40e_status_code status;
1997         struct i40e_aq_get_phy_abilities_resp phy_ab;
1998         struct i40e_aq_set_phy_config phy_conf;
1999         enum i40e_aq_phy_type cnt;
2000         uint32_t phy_type_mask = 0;
2001
2002         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2003                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2004                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2005                         I40E_AQ_PHY_FLAG_LOW_POWER;
2006         const uint8_t advt = I40E_LINK_SPEED_40GB |
2007                         I40E_LINK_SPEED_25GB |
2008                         I40E_LINK_SPEED_10GB |
2009                         I40E_LINK_SPEED_1GB |
2010                         I40E_LINK_SPEED_100MB;
2011         int ret = -ENOTSUP;
2012
2013
2014         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2015                                               NULL);
2016         if (status)
2017                 return ret;
2018
2019         /* If link already up, no need to set up again */
2020         if (is_up && phy_ab.phy_type != 0)
2021                 return I40E_SUCCESS;
2022
2023         memset(&phy_conf, 0, sizeof(phy_conf));
2024
2025         /* bits 0-2 use the values from get_phy_abilities_resp */
2026         abilities &= ~mask;
2027         abilities |= phy_ab.abilities & mask;
2028
2029         /* update ablities and speed */
2030         if (abilities & I40E_AQ_PHY_AN_ENABLED)
2031                 phy_conf.link_speed = advt;
2032         else
2033                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
2034
2035         phy_conf.abilities = abilities;
2036
2037
2038
2039         /* To enable link, phy_type mask needs to include each type */
2040         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
2041                 phy_type_mask |= 1 << cnt;
2042
2043         /* use get_phy_abilities_resp value for the rest */
2044         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2045         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2046                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2047                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2048         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2049         phy_conf.eee_capability = phy_ab.eee_capability;
2050         phy_conf.eeer = phy_ab.eeer_val;
2051         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2052
2053         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2054                     phy_ab.abilities, phy_ab.link_speed);
2055         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2056                     phy_conf.abilities, phy_conf.link_speed);
2057
2058         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2059         if (status)
2060                 return ret;
2061
2062         return I40E_SUCCESS;
2063 }
2064
2065 static int
2066 i40e_apply_link_speed(struct rte_eth_dev *dev)
2067 {
2068         uint8_t speed;
2069         uint8_t abilities = 0;
2070         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2071         struct rte_eth_conf *conf = &dev->data->dev_conf;
2072
2073         speed = i40e_parse_link_speeds(conf->link_speeds);
2074         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2075         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
2076                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2077         abilities |= I40E_AQ_PHY_LINK_ENABLED;
2078
2079         return i40e_phy_conf_link(hw, abilities, speed, true);
2080 }
2081
2082 static int
2083 i40e_dev_start(struct rte_eth_dev *dev)
2084 {
2085         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2086         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2087         struct i40e_vsi *main_vsi = pf->main_vsi;
2088         int ret, i;
2089         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2090         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2091         uint32_t intr_vector = 0;
2092         struct i40e_vsi *vsi;
2093
2094         hw->adapter_stopped = 0;
2095
2096         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2097                 PMD_INIT_LOG(ERR,
2098                 "Invalid link_speeds for port %u, autonegotiation disabled",
2099                               dev->data->port_id);
2100                 return -EINVAL;
2101         }
2102
2103         rte_intr_disable(intr_handle);
2104
2105         if ((rte_intr_cap_multiple(intr_handle) ||
2106              !RTE_ETH_DEV_SRIOV(dev).active) &&
2107             dev->data->dev_conf.intr_conf.rxq != 0) {
2108                 intr_vector = dev->data->nb_rx_queues;
2109                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2110                 if (ret)
2111                         return ret;
2112         }
2113
2114         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2115                 intr_handle->intr_vec =
2116                         rte_zmalloc("intr_vec",
2117                                     dev->data->nb_rx_queues * sizeof(int),
2118                                     0);
2119                 if (!intr_handle->intr_vec) {
2120                         PMD_INIT_LOG(ERR,
2121                                 "Failed to allocate %d rx_queues intr_vec",
2122                                 dev->data->nb_rx_queues);
2123                         return -ENOMEM;
2124                 }
2125         }
2126
2127         /* Initialize VSI */
2128         ret = i40e_dev_rxtx_init(pf);
2129         if (ret != I40E_SUCCESS) {
2130                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2131                 goto err_up;
2132         }
2133
2134         /* Map queues with MSIX interrupt */
2135         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2136                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2137         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2138         i40e_vsi_enable_queues_intr(main_vsi);
2139
2140         /* Map VMDQ VSI queues with MSIX interrupt */
2141         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2142                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2143                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2144                                           I40E_ITR_INDEX_DEFAULT);
2145                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2146         }
2147
2148         /* enable FDIR MSIX interrupt */
2149         if (pf->fdir.fdir_vsi) {
2150                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2151                                           I40E_ITR_INDEX_NONE);
2152                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2153         }
2154
2155         /* Enable all queues which have been configured */
2156         ret = i40e_dev_switch_queues(pf, TRUE);
2157         if (ret != I40E_SUCCESS) {
2158                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2159                 goto err_up;
2160         }
2161
2162         /* Enable receiving broadcast packets */
2163         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2164         if (ret != I40E_SUCCESS)
2165                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2166
2167         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2168                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2169                                                 true, NULL);
2170                 if (ret != I40E_SUCCESS)
2171                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2172         }
2173
2174         /* Enable the VLAN promiscuous mode. */
2175         if (pf->vfs) {
2176                 for (i = 0; i < pf->vf_num; i++) {
2177                         vsi = pf->vfs[i].vsi;
2178                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2179                                                      true, NULL);
2180                 }
2181         }
2182
2183         /* Enable mac loopback mode */
2184         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2185             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2186                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2187                 if (ret != I40E_SUCCESS) {
2188                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2189                         goto err_up;
2190                 }
2191         }
2192
2193         /* Apply link configure */
2194         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2195                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2196                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2197                                 ETH_LINK_SPEED_40G)) {
2198                 PMD_DRV_LOG(ERR, "Invalid link setting");
2199                 goto err_up;
2200         }
2201         ret = i40e_apply_link_speed(dev);
2202         if (I40E_SUCCESS != ret) {
2203                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2204                 goto err_up;
2205         }
2206
2207         if (!rte_intr_allow_others(intr_handle)) {
2208                 rte_intr_callback_unregister(intr_handle,
2209                                              i40e_dev_interrupt_handler,
2210                                              (void *)dev);
2211                 /* configure and enable device interrupt */
2212                 i40e_pf_config_irq0(hw, FALSE);
2213                 i40e_pf_enable_irq0(hw);
2214
2215                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2216                         PMD_INIT_LOG(INFO,
2217                                 "lsc won't enable because of no intr multiplex");
2218         } else {
2219                 ret = i40e_aq_set_phy_int_mask(hw,
2220                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2221                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2222                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2223                 if (ret != I40E_SUCCESS)
2224                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2225
2226                 /* Call get_link_info aq commond to enable/disable LSE */
2227                 i40e_dev_link_update(dev, 0);
2228         }
2229
2230         /* enable uio intr after callback register */
2231         rte_intr_enable(intr_handle);
2232
2233         i40e_filter_restore(pf);
2234
2235         if (pf->tm_conf.root && !pf->tm_conf.committed)
2236                 PMD_DRV_LOG(WARNING,
2237                             "please call hierarchy_commit() "
2238                             "before starting the port");
2239
2240         return I40E_SUCCESS;
2241
2242 err_up:
2243         i40e_dev_switch_queues(pf, FALSE);
2244         i40e_dev_clear_queues(dev);
2245
2246         return ret;
2247 }
2248
2249 static void
2250 i40e_dev_stop(struct rte_eth_dev *dev)
2251 {
2252         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2253         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2254         struct i40e_vsi *main_vsi = pf->main_vsi;
2255         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2256         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2257         int i;
2258
2259         if (hw->adapter_stopped == 1)
2260                 return;
2261         /* Disable all queues */
2262         i40e_dev_switch_queues(pf, FALSE);
2263
2264         /* un-map queues with interrupt registers */
2265         i40e_vsi_disable_queues_intr(main_vsi);
2266         i40e_vsi_queues_unbind_intr(main_vsi);
2267
2268         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2269                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2270                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2271         }
2272
2273         if (pf->fdir.fdir_vsi) {
2274                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2275                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2276         }
2277         /* Clear all queues and release memory */
2278         i40e_dev_clear_queues(dev);
2279
2280         /* Set link down */
2281         i40e_dev_set_link_down(dev);
2282
2283         if (!rte_intr_allow_others(intr_handle))
2284                 /* resume to the default handler */
2285                 rte_intr_callback_register(intr_handle,
2286                                            i40e_dev_interrupt_handler,
2287                                            (void *)dev);
2288
2289         /* Clean datapath event and queue/vec mapping */
2290         rte_intr_efd_disable(intr_handle);
2291         if (intr_handle->intr_vec) {
2292                 rte_free(intr_handle->intr_vec);
2293                 intr_handle->intr_vec = NULL;
2294         }
2295
2296         /* reset hierarchy commit */
2297         pf->tm_conf.committed = false;
2298
2299         hw->adapter_stopped = 1;
2300 }
2301
2302 static void
2303 i40e_dev_close(struct rte_eth_dev *dev)
2304 {
2305         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2306         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2307         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2308         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2309         struct i40e_mirror_rule *p_mirror;
2310         uint32_t reg;
2311         int i;
2312         int ret;
2313
2314         PMD_INIT_FUNC_TRACE();
2315
2316         i40e_dev_stop(dev);
2317
2318         /* Remove all mirror rules */
2319         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2320                 ret = i40e_aq_del_mirror_rule(hw,
2321                                               pf->main_vsi->veb->seid,
2322                                               p_mirror->rule_type,
2323                                               p_mirror->entries,
2324                                               p_mirror->num_entries,
2325                                               p_mirror->id);
2326                 if (ret < 0)
2327                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2328                                     "status = %d, aq_err = %d.", ret,
2329                                     hw->aq.asq_last_status);
2330
2331                 /* remove mirror software resource anyway */
2332                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2333                 rte_free(p_mirror);
2334                 pf->nb_mirror_rule--;
2335         }
2336
2337         i40e_dev_free_queues(dev);
2338
2339         /* Disable interrupt */
2340         i40e_pf_disable_irq0(hw);
2341         rte_intr_disable(intr_handle);
2342
2343         /* shutdown and destroy the HMC */
2344         i40e_shutdown_lan_hmc(hw);
2345
2346         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2347                 i40e_vsi_release(pf->vmdq[i].vsi);
2348                 pf->vmdq[i].vsi = NULL;
2349         }
2350         rte_free(pf->vmdq);
2351         pf->vmdq = NULL;
2352
2353         /* release all the existing VSIs and VEBs */
2354         i40e_fdir_teardown(pf);
2355         i40e_vsi_release(pf->main_vsi);
2356
2357         /* shutdown the adminq */
2358         i40e_aq_queue_shutdown(hw, true);
2359         i40e_shutdown_adminq(hw);
2360
2361         i40e_res_pool_destroy(&pf->qp_pool);
2362         i40e_res_pool_destroy(&pf->msix_pool);
2363
2364         /* Disable flexible payload in global configuration */
2365         if (!pf->support_multi_driver)
2366                 i40e_flex_payload_reg_set_default(hw);
2367
2368         /* force a PF reset to clean anything leftover */
2369         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2370         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2371                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2372         I40E_WRITE_FLUSH(hw);
2373 }
2374
2375 /*
2376  * Reset PF device only to re-initialize resources in PMD layer
2377  */
2378 static int
2379 i40e_dev_reset(struct rte_eth_dev *dev)
2380 {
2381         int ret;
2382
2383         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2384          * its VF to make them align with it. The detailed notification
2385          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2386          * To avoid unexpected behavior in VF, currently reset of PF with
2387          * SR-IOV activation is not supported. It might be supported later.
2388          */
2389         if (dev->data->sriov.active)
2390                 return -ENOTSUP;
2391
2392         ret = eth_i40e_dev_uninit(dev);
2393         if (ret)
2394                 return ret;
2395
2396         ret = eth_i40e_dev_init(dev, NULL);
2397
2398         return ret;
2399 }
2400
2401 static void
2402 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2403 {
2404         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2405         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2406         struct i40e_vsi *vsi = pf->main_vsi;
2407         int status;
2408
2409         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2410                                                      true, NULL, true);
2411         if (status != I40E_SUCCESS)
2412                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2413
2414         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2415                                                         TRUE, NULL);
2416         if (status != I40E_SUCCESS)
2417                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2418
2419 }
2420
2421 static void
2422 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2423 {
2424         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2425         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2426         struct i40e_vsi *vsi = pf->main_vsi;
2427         int status;
2428
2429         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2430                                                      false, NULL, true);
2431         if (status != I40E_SUCCESS)
2432                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2433
2434         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2435                                                         false, NULL);
2436         if (status != I40E_SUCCESS)
2437                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2438 }
2439
2440 static void
2441 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2442 {
2443         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2444         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2445         struct i40e_vsi *vsi = pf->main_vsi;
2446         int ret;
2447
2448         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2449         if (ret != I40E_SUCCESS)
2450                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2451 }
2452
2453 static void
2454 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2455 {
2456         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2457         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2458         struct i40e_vsi *vsi = pf->main_vsi;
2459         int ret;
2460
2461         if (dev->data->promiscuous == 1)
2462                 return; /* must remain in all_multicast mode */
2463
2464         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2465                                 vsi->seid, FALSE, NULL);
2466         if (ret != I40E_SUCCESS)
2467                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2468 }
2469
2470 /*
2471  * Set device link up.
2472  */
2473 static int
2474 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2475 {
2476         /* re-apply link speed setting */
2477         return i40e_apply_link_speed(dev);
2478 }
2479
2480 /*
2481  * Set device link down.
2482  */
2483 static int
2484 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2485 {
2486         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2487         uint8_t abilities = 0;
2488         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2489
2490         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2491         return i40e_phy_conf_link(hw, abilities, speed, false);
2492 }
2493
2494 static __rte_always_inline void
2495 update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
2496 {
2497 /* Link status registers and values*/
2498 #define I40E_PRTMAC_LINKSTA             0x001E2420
2499 #define I40E_REG_LINK_UP                0x40000080
2500 #define I40E_PRTMAC_MACC                0x001E24E0
2501 #define I40E_REG_MACC_25GB              0x00020000
2502 #define I40E_REG_SPEED_MASK             0x38000000
2503 #define I40E_REG_SPEED_100MB            0x00000000
2504 #define I40E_REG_SPEED_1GB              0x08000000
2505 #define I40E_REG_SPEED_10GB             0x10000000
2506 #define I40E_REG_SPEED_20GB             0x20000000
2507 #define I40E_REG_SPEED_25_40GB          0x18000000
2508         uint32_t link_speed;
2509         uint32_t reg_val;
2510
2511         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2512         link_speed = reg_val & I40E_REG_SPEED_MASK;
2513         reg_val &= I40E_REG_LINK_UP;
2514         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2515
2516         if (unlikely(link->link_status != 0))
2517                 return;
2518
2519         /* Parse the link status */
2520         switch (link_speed) {
2521         case I40E_REG_SPEED_100MB:
2522                 link->link_speed = ETH_SPEED_NUM_100M;
2523                 break;
2524         case I40E_REG_SPEED_1GB:
2525                 link->link_speed = ETH_SPEED_NUM_1G;
2526                 break;
2527         case I40E_REG_SPEED_10GB:
2528                 link->link_speed = ETH_SPEED_NUM_10G;
2529                 break;
2530         case I40E_REG_SPEED_20GB:
2531                 link->link_speed = ETH_SPEED_NUM_20G;
2532                 break;
2533         case I40E_REG_SPEED_25_40GB:
2534                 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2535
2536                 if (reg_val & I40E_REG_MACC_25GB)
2537                         link->link_speed = ETH_SPEED_NUM_25G;
2538                 else
2539                         link->link_speed = ETH_SPEED_NUM_40G;
2540
2541                 break;
2542         default:
2543                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2544                 break;
2545         }
2546 }
2547
2548 static __rte_always_inline void
2549 update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
2550         bool enable_lse)
2551 {
2552 #define CHECK_INTERVAL             100  /* 100ms */
2553 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2554         uint32_t rep_cnt = MAX_REPEAT_TIME;
2555         struct i40e_link_status link_status;
2556         int status;
2557
2558         memset(&link_status, 0, sizeof(link_status));
2559
2560         do {
2561                 memset(&link_status, 0, sizeof(link_status));
2562
2563                 /* Get link status information from hardware */
2564                 status = i40e_aq_get_link_info(hw, enable_lse,
2565                                                 &link_status, NULL);
2566                 if (unlikely(status != I40E_SUCCESS)) {
2567                         link->link_speed = ETH_SPEED_NUM_100M;
2568                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2569                         PMD_DRV_LOG(ERR, "Failed to get link info");
2570                         return;
2571                 }
2572
2573                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2574                 if (unlikely(link->link_status != 0))
2575                         return;
2576
2577                 rte_delay_ms(CHECK_INTERVAL);
2578         } while (--rep_cnt);
2579
2580         /* Parse the link status */
2581         switch (link_status.link_speed) {
2582         case I40E_LINK_SPEED_100MB:
2583                 link->link_speed = ETH_SPEED_NUM_100M;
2584                 break;
2585         case I40E_LINK_SPEED_1GB:
2586                 link->link_speed = ETH_SPEED_NUM_1G;
2587                 break;
2588         case I40E_LINK_SPEED_10GB:
2589                 link->link_speed = ETH_SPEED_NUM_10G;
2590                 break;
2591         case I40E_LINK_SPEED_20GB:
2592                 link->link_speed = ETH_SPEED_NUM_20G;
2593                 break;
2594         case I40E_LINK_SPEED_25GB:
2595                 link->link_speed = ETH_SPEED_NUM_25G;
2596                 break;
2597         case I40E_LINK_SPEED_40GB:
2598                 link->link_speed = ETH_SPEED_NUM_40G;
2599                 break;
2600         default:
2601                 link->link_speed = ETH_SPEED_NUM_100M;
2602                 break;
2603         }
2604 }
2605
2606 int
2607 i40e_dev_link_update(struct rte_eth_dev *dev,
2608                      int wait_to_complete)
2609 {
2610         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2611         struct rte_eth_link link;
2612         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2613         int ret;
2614
2615         memset(&link, 0, sizeof(link));
2616
2617         /* i40e uses full duplex only */
2618         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2619         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2620                         ETH_LINK_SPEED_FIXED);
2621
2622         if (!wait_to_complete)
2623                 update_link_no_wait(hw, &link);
2624         else
2625                 update_link_wait(hw, &link, enable_lse);
2626
2627         ret = rte_eth_linkstatus_set(dev, &link);
2628         i40e_notify_all_vfs_link_status(dev);
2629
2630         return ret;
2631 }
2632
2633 /* Get all the statistics of a VSI */
2634 void
2635 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2636 {
2637         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2638         struct i40e_eth_stats *nes = &vsi->eth_stats;
2639         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2640         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2641
2642         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2643                             vsi->offset_loaded, &oes->rx_bytes,
2644                             &nes->rx_bytes);
2645         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2646                             vsi->offset_loaded, &oes->rx_unicast,
2647                             &nes->rx_unicast);
2648         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2649                             vsi->offset_loaded, &oes->rx_multicast,
2650                             &nes->rx_multicast);
2651         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2652                             vsi->offset_loaded, &oes->rx_broadcast,
2653                             &nes->rx_broadcast);
2654         /* exclude CRC bytes */
2655         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2656                 nes->rx_broadcast) * ETHER_CRC_LEN;
2657
2658         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2659                             &oes->rx_discards, &nes->rx_discards);
2660         /* GLV_REPC not supported */
2661         /* GLV_RMPC not supported */
2662         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2663                             &oes->rx_unknown_protocol,
2664                             &nes->rx_unknown_protocol);
2665         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2666                             vsi->offset_loaded, &oes->tx_bytes,
2667                             &nes->tx_bytes);
2668         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2669                             vsi->offset_loaded, &oes->tx_unicast,
2670                             &nes->tx_unicast);
2671         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2672                             vsi->offset_loaded, &oes->tx_multicast,
2673                             &nes->tx_multicast);
2674         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2675                             vsi->offset_loaded,  &oes->tx_broadcast,
2676                             &nes->tx_broadcast);
2677         /* GLV_TDPC not supported */
2678         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2679                             &oes->tx_errors, &nes->tx_errors);
2680         vsi->offset_loaded = true;
2681
2682         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2683                     vsi->vsi_id);
2684         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2685         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2686         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2687         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2688         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2689         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2690                     nes->rx_unknown_protocol);
2691         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2692         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2693         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2694         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2695         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2696         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2697         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2698                     vsi->vsi_id);
2699 }
2700
2701 static void
2702 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2703 {
2704         unsigned int i;
2705         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2706         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2707
2708         /* Get rx/tx bytes of internal transfer packets */
2709         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2710                         I40E_GLV_GORCL(hw->port),
2711                         pf->offset_loaded,
2712                         &pf->internal_stats_offset.rx_bytes,
2713                         &pf->internal_stats.rx_bytes);
2714
2715         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2716                         I40E_GLV_GOTCL(hw->port),
2717                         pf->offset_loaded,
2718                         &pf->internal_stats_offset.tx_bytes,
2719                         &pf->internal_stats.tx_bytes);
2720         /* Get total internal rx packet count */
2721         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2722                             I40E_GLV_UPRCL(hw->port),
2723                             pf->offset_loaded,
2724                             &pf->internal_stats_offset.rx_unicast,
2725                             &pf->internal_stats.rx_unicast);
2726         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2727                             I40E_GLV_MPRCL(hw->port),
2728                             pf->offset_loaded,
2729                             &pf->internal_stats_offset.rx_multicast,
2730                             &pf->internal_stats.rx_multicast);
2731         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2732                             I40E_GLV_BPRCL(hw->port),
2733                             pf->offset_loaded,
2734                             &pf->internal_stats_offset.rx_broadcast,
2735                             &pf->internal_stats.rx_broadcast);
2736         /* Get total internal tx packet count */
2737         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2738                             I40E_GLV_UPTCL(hw->port),
2739                             pf->offset_loaded,
2740                             &pf->internal_stats_offset.tx_unicast,
2741                             &pf->internal_stats.tx_unicast);
2742         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2743                             I40E_GLV_MPTCL(hw->port),
2744                             pf->offset_loaded,
2745                             &pf->internal_stats_offset.tx_multicast,
2746                             &pf->internal_stats.tx_multicast);
2747         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2748                             I40E_GLV_BPTCL(hw->port),
2749                             pf->offset_loaded,
2750                             &pf->internal_stats_offset.tx_broadcast,
2751                             &pf->internal_stats.tx_broadcast);
2752
2753         /* exclude CRC size */
2754         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2755                 pf->internal_stats.rx_multicast +
2756                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2757
2758         /* Get statistics of struct i40e_eth_stats */
2759         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2760                             I40E_GLPRT_GORCL(hw->port),
2761                             pf->offset_loaded, &os->eth.rx_bytes,
2762                             &ns->eth.rx_bytes);
2763         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2764                             I40E_GLPRT_UPRCL(hw->port),
2765                             pf->offset_loaded, &os->eth.rx_unicast,
2766                             &ns->eth.rx_unicast);
2767         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2768                             I40E_GLPRT_MPRCL(hw->port),
2769                             pf->offset_loaded, &os->eth.rx_multicast,
2770                             &ns->eth.rx_multicast);
2771         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2772                             I40E_GLPRT_BPRCL(hw->port),
2773                             pf->offset_loaded, &os->eth.rx_broadcast,
2774                             &ns->eth.rx_broadcast);
2775         /* Workaround: CRC size should not be included in byte statistics,
2776          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2777          */
2778         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2779                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2780
2781         /* exclude internal rx bytes
2782          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2783          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2784          * value.
2785          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2786          */
2787         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2788                 ns->eth.rx_bytes = 0;
2789         else
2790                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2791
2792         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2793                 ns->eth.rx_unicast = 0;
2794         else
2795                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2796
2797         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2798                 ns->eth.rx_multicast = 0;
2799         else
2800                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2801
2802         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2803                 ns->eth.rx_broadcast = 0;
2804         else
2805                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2806
2807         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2808                             pf->offset_loaded, &os->eth.rx_discards,
2809                             &ns->eth.rx_discards);
2810         /* GLPRT_REPC not supported */
2811         /* GLPRT_RMPC not supported */
2812         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2813                             pf->offset_loaded,
2814                             &os->eth.rx_unknown_protocol,
2815                             &ns->eth.rx_unknown_protocol);
2816         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2817                             I40E_GLPRT_GOTCL(hw->port),
2818                             pf->offset_loaded, &os->eth.tx_bytes,
2819                             &ns->eth.tx_bytes);
2820         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2821                             I40E_GLPRT_UPTCL(hw->port),
2822                             pf->offset_loaded, &os->eth.tx_unicast,
2823                             &ns->eth.tx_unicast);
2824         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2825                             I40E_GLPRT_MPTCL(hw->port),
2826                             pf->offset_loaded, &os->eth.tx_multicast,
2827                             &ns->eth.tx_multicast);
2828         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2829                             I40E_GLPRT_BPTCL(hw->port),
2830                             pf->offset_loaded, &os->eth.tx_broadcast,
2831                             &ns->eth.tx_broadcast);
2832         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2833                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2834
2835         /* exclude internal tx bytes
2836          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2837          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2838          * value.
2839          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2840          */
2841         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2842                 ns->eth.tx_bytes = 0;
2843         else
2844                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2845
2846         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2847                 ns->eth.tx_unicast = 0;
2848         else
2849                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2850
2851         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2852                 ns->eth.tx_multicast = 0;
2853         else
2854                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2855
2856         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2857                 ns->eth.tx_broadcast = 0;
2858         else
2859                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2860
2861         /* GLPRT_TEPC not supported */
2862
2863         /* additional port specific stats */
2864         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2865                             pf->offset_loaded, &os->tx_dropped_link_down,
2866                             &ns->tx_dropped_link_down);
2867         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2868                             pf->offset_loaded, &os->crc_errors,
2869                             &ns->crc_errors);
2870         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2871                             pf->offset_loaded, &os->illegal_bytes,
2872                             &ns->illegal_bytes);
2873         /* GLPRT_ERRBC not supported */
2874         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2875                             pf->offset_loaded, &os->mac_local_faults,
2876                             &ns->mac_local_faults);
2877         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2878                             pf->offset_loaded, &os->mac_remote_faults,
2879                             &ns->mac_remote_faults);
2880         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2881                             pf->offset_loaded, &os->rx_length_errors,
2882                             &ns->rx_length_errors);
2883         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2884                             pf->offset_loaded, &os->link_xon_rx,
2885                             &ns->link_xon_rx);
2886         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2887                             pf->offset_loaded, &os->link_xoff_rx,
2888                             &ns->link_xoff_rx);
2889         for (i = 0; i < 8; i++) {
2890                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2891                                     pf->offset_loaded,
2892                                     &os->priority_xon_rx[i],
2893                                     &ns->priority_xon_rx[i]);
2894                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2895                                     pf->offset_loaded,
2896                                     &os->priority_xoff_rx[i],
2897                                     &ns->priority_xoff_rx[i]);
2898         }
2899         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2900                             pf->offset_loaded, &os->link_xon_tx,
2901                             &ns->link_xon_tx);
2902         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2903                             pf->offset_loaded, &os->link_xoff_tx,
2904                             &ns->link_xoff_tx);
2905         for (i = 0; i < 8; i++) {
2906                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2907                                     pf->offset_loaded,
2908                                     &os->priority_xon_tx[i],
2909                                     &ns->priority_xon_tx[i]);
2910                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2911                                     pf->offset_loaded,
2912                                     &os->priority_xoff_tx[i],
2913                                     &ns->priority_xoff_tx[i]);
2914                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2915                                     pf->offset_loaded,
2916                                     &os->priority_xon_2_xoff[i],
2917                                     &ns->priority_xon_2_xoff[i]);
2918         }
2919         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2920                             I40E_GLPRT_PRC64L(hw->port),
2921                             pf->offset_loaded, &os->rx_size_64,
2922                             &ns->rx_size_64);
2923         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2924                             I40E_GLPRT_PRC127L(hw->port),
2925                             pf->offset_loaded, &os->rx_size_127,
2926                             &ns->rx_size_127);
2927         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2928                             I40E_GLPRT_PRC255L(hw->port),
2929                             pf->offset_loaded, &os->rx_size_255,
2930                             &ns->rx_size_255);
2931         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2932                             I40E_GLPRT_PRC511L(hw->port),
2933                             pf->offset_loaded, &os->rx_size_511,
2934                             &ns->rx_size_511);
2935         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2936                             I40E_GLPRT_PRC1023L(hw->port),
2937                             pf->offset_loaded, &os->rx_size_1023,
2938                             &ns->rx_size_1023);
2939         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2940                             I40E_GLPRT_PRC1522L(hw->port),
2941                             pf->offset_loaded, &os->rx_size_1522,
2942                             &ns->rx_size_1522);
2943         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2944                             I40E_GLPRT_PRC9522L(hw->port),
2945                             pf->offset_loaded, &os->rx_size_big,
2946                             &ns->rx_size_big);
2947         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2948                             pf->offset_loaded, &os->rx_undersize,
2949                             &ns->rx_undersize);
2950         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2951                             pf->offset_loaded, &os->rx_fragments,
2952                             &ns->rx_fragments);
2953         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2954                             pf->offset_loaded, &os->rx_oversize,
2955                             &ns->rx_oversize);
2956         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2957                             pf->offset_loaded, &os->rx_jabber,
2958                             &ns->rx_jabber);
2959         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2960                             I40E_GLPRT_PTC64L(hw->port),
2961                             pf->offset_loaded, &os->tx_size_64,
2962                             &ns->tx_size_64);
2963         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2964                             I40E_GLPRT_PTC127L(hw->port),
2965                             pf->offset_loaded, &os->tx_size_127,
2966                             &ns->tx_size_127);
2967         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2968                             I40E_GLPRT_PTC255L(hw->port),
2969                             pf->offset_loaded, &os->tx_size_255,
2970                             &ns->tx_size_255);
2971         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2972                             I40E_GLPRT_PTC511L(hw->port),
2973                             pf->offset_loaded, &os->tx_size_511,
2974                             &ns->tx_size_511);
2975         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2976                             I40E_GLPRT_PTC1023L(hw->port),
2977                             pf->offset_loaded, &os->tx_size_1023,
2978                             &ns->tx_size_1023);
2979         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2980                             I40E_GLPRT_PTC1522L(hw->port),
2981                             pf->offset_loaded, &os->tx_size_1522,
2982                             &ns->tx_size_1522);
2983         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2984                             I40E_GLPRT_PTC9522L(hw->port),
2985                             pf->offset_loaded, &os->tx_size_big,
2986                             &ns->tx_size_big);
2987         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2988                            pf->offset_loaded,
2989                            &os->fd_sb_match, &ns->fd_sb_match);
2990         /* GLPRT_MSPDC not supported */
2991         /* GLPRT_XEC not supported */
2992
2993         pf->offset_loaded = true;
2994
2995         if (pf->main_vsi)
2996                 i40e_update_vsi_stats(pf->main_vsi);
2997 }
2998
2999 /* Get all statistics of a port */
3000 static int
3001 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3002 {
3003         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3004         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3005         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3006         unsigned i;
3007
3008         /* call read registers - updates values, now write them to struct */
3009         i40e_read_stats_registers(pf, hw);
3010
3011         stats->ipackets = ns->eth.rx_unicast +
3012                         ns->eth.rx_multicast +
3013                         ns->eth.rx_broadcast -
3014                         ns->eth.rx_discards -
3015                         pf->main_vsi->eth_stats.rx_discards;
3016         stats->opackets = ns->eth.tx_unicast +
3017                         ns->eth.tx_multicast +
3018                         ns->eth.tx_broadcast;
3019         stats->ibytes   = ns->eth.rx_bytes;
3020         stats->obytes   = ns->eth.tx_bytes;
3021         stats->oerrors  = ns->eth.tx_errors +
3022                         pf->main_vsi->eth_stats.tx_errors;
3023
3024         /* Rx Errors */
3025         stats->imissed  = ns->eth.rx_discards +
3026                         pf->main_vsi->eth_stats.rx_discards;
3027         stats->ierrors  = ns->crc_errors +
3028                         ns->rx_length_errors + ns->rx_undersize +
3029                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3030
3031         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3032         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3033         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3034         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3035         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3036         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3037         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3038                     ns->eth.rx_unknown_protocol);
3039         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3040         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3041         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3042         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3043         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3044         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3045
3046         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3047                     ns->tx_dropped_link_down);
3048         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3049         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3050                     ns->illegal_bytes);
3051         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3052         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3053                     ns->mac_local_faults);
3054         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3055                     ns->mac_remote_faults);
3056         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3057                     ns->rx_length_errors);
3058         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3059         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3060         for (i = 0; i < 8; i++) {
3061                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3062                                 i, ns->priority_xon_rx[i]);
3063                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3064                                 i, ns->priority_xoff_rx[i]);
3065         }
3066         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3067         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3068         for (i = 0; i < 8; i++) {
3069                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3070                                 i, ns->priority_xon_tx[i]);
3071                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3072                                 i, ns->priority_xoff_tx[i]);
3073                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3074                                 i, ns->priority_xon_2_xoff[i]);
3075         }
3076         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3077         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3078         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3079         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3080         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3081         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3082         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3083         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3084         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3085         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3086         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3087         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3088         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3089         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3090         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3091         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3092         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3093         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3094         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3095                         ns->mac_short_packet_dropped);
3096         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3097                     ns->checksum_error);
3098         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3099         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3100         return 0;
3101 }
3102
3103 /* Reset the statistics */
3104 static void
3105 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3106 {
3107         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3108         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3109
3110         /* Mark PF and VSI stats to update the offset, aka "reset" */
3111         pf->offset_loaded = false;
3112         if (pf->main_vsi)
3113                 pf->main_vsi->offset_loaded = false;
3114
3115         /* read the stats, reading current register values into offset */
3116         i40e_read_stats_registers(pf, hw);
3117 }
3118
3119 static uint32_t
3120 i40e_xstats_calc_num(void)
3121 {
3122         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3123                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3124                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3125 }
3126
3127 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3128                                      struct rte_eth_xstat_name *xstats_names,
3129                                      __rte_unused unsigned limit)
3130 {
3131         unsigned count = 0;
3132         unsigned i, prio;
3133
3134         if (xstats_names == NULL)
3135                 return i40e_xstats_calc_num();
3136
3137         /* Note: limit checked in rte_eth_xstats_names() */
3138
3139         /* Get stats from i40e_eth_stats struct */
3140         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3141                 snprintf(xstats_names[count].name,
3142                          sizeof(xstats_names[count].name),
3143                          "%s", rte_i40e_stats_strings[i].name);
3144                 count++;
3145         }
3146
3147         /* Get individiual stats from i40e_hw_port struct */
3148         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3149                 snprintf(xstats_names[count].name,
3150                         sizeof(xstats_names[count].name),
3151                          "%s", rte_i40e_hw_port_strings[i].name);
3152                 count++;
3153         }
3154
3155         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3156                 for (prio = 0; prio < 8; prio++) {
3157                         snprintf(xstats_names[count].name,
3158                                  sizeof(xstats_names[count].name),
3159                                  "rx_priority%u_%s", prio,
3160                                  rte_i40e_rxq_prio_strings[i].name);
3161                         count++;
3162                 }
3163         }
3164
3165         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3166                 for (prio = 0; prio < 8; prio++) {
3167                         snprintf(xstats_names[count].name,
3168                                  sizeof(xstats_names[count].name),
3169                                  "tx_priority%u_%s", prio,
3170                                  rte_i40e_txq_prio_strings[i].name);
3171                         count++;
3172                 }
3173         }
3174         return count;
3175 }
3176
3177 static int
3178 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3179                     unsigned n)
3180 {
3181         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3182         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3183         unsigned i, count, prio;
3184         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3185
3186         count = i40e_xstats_calc_num();
3187         if (n < count)
3188                 return count;
3189
3190         i40e_read_stats_registers(pf, hw);
3191
3192         if (xstats == NULL)
3193                 return 0;
3194
3195         count = 0;
3196
3197         /* Get stats from i40e_eth_stats struct */
3198         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3199                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3200                         rte_i40e_stats_strings[i].offset);
3201                 xstats[count].id = count;
3202                 count++;
3203         }
3204
3205         /* Get individiual stats from i40e_hw_port struct */
3206         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3207                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3208                         rte_i40e_hw_port_strings[i].offset);
3209                 xstats[count].id = count;
3210                 count++;
3211         }
3212
3213         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3214                 for (prio = 0; prio < 8; prio++) {
3215                         xstats[count].value =
3216                                 *(uint64_t *)(((char *)hw_stats) +
3217                                 rte_i40e_rxq_prio_strings[i].offset +
3218                                 (sizeof(uint64_t) * prio));
3219                         xstats[count].id = count;
3220                         count++;
3221                 }
3222         }
3223
3224         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3225                 for (prio = 0; prio < 8; prio++) {
3226                         xstats[count].value =
3227                                 *(uint64_t *)(((char *)hw_stats) +
3228                                 rte_i40e_txq_prio_strings[i].offset +
3229                                 (sizeof(uint64_t) * prio));
3230                         xstats[count].id = count;
3231                         count++;
3232                 }
3233         }
3234
3235         return count;
3236 }
3237
3238 static int
3239 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3240                                  __rte_unused uint16_t queue_id,
3241                                  __rte_unused uint8_t stat_idx,
3242                                  __rte_unused uint8_t is_rx)
3243 {
3244         PMD_INIT_FUNC_TRACE();
3245
3246         return -ENOSYS;
3247 }
3248
3249 static int
3250 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3251 {
3252         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3253         u32 full_ver;
3254         u8 ver, patch;
3255         u16 build;
3256         int ret;
3257
3258         full_ver = hw->nvm.oem_ver;
3259         ver = (u8)(full_ver >> 24);
3260         build = (u16)((full_ver >> 8) & 0xffff);
3261         patch = (u8)(full_ver & 0xff);
3262
3263         ret = snprintf(fw_version, fw_size,
3264                  "%d.%d%d 0x%08x %d.%d.%d",
3265                  ((hw->nvm.version >> 12) & 0xf),
3266                  ((hw->nvm.version >> 4) & 0xff),
3267                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3268                  ver, build, patch);
3269
3270         ret += 1; /* add the size of '\0' */
3271         if (fw_size < (u32)ret)
3272                 return ret;
3273         else
3274                 return 0;
3275 }
3276
3277 static void
3278 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3279 {
3280         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3281         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3282         struct i40e_vsi *vsi = pf->main_vsi;
3283         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3284
3285         dev_info->max_rx_queues = vsi->nb_qps;
3286         dev_info->max_tx_queues = vsi->nb_qps;
3287         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3288         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3289         dev_info->max_mac_addrs = vsi->max_macaddrs;
3290         dev_info->max_vfs = pci_dev->max_vfs;
3291         dev_info->rx_queue_offload_capa = 0;
3292         dev_info->rx_offload_capa =
3293                 DEV_RX_OFFLOAD_VLAN_STRIP |
3294                 DEV_RX_OFFLOAD_QINQ_STRIP |
3295                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3296                 DEV_RX_OFFLOAD_UDP_CKSUM |
3297                 DEV_RX_OFFLOAD_TCP_CKSUM |
3298                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3299                 DEV_RX_OFFLOAD_CRC_STRIP |
3300                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3301                 DEV_RX_OFFLOAD_VLAN_FILTER |
3302                 DEV_RX_OFFLOAD_JUMBO_FRAME;
3303
3304         dev_info->tx_queue_offload_capa = 0;
3305         dev_info->tx_offload_capa =
3306                 DEV_TX_OFFLOAD_VLAN_INSERT |
3307                 DEV_TX_OFFLOAD_QINQ_INSERT |
3308                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3309                 DEV_TX_OFFLOAD_UDP_CKSUM |
3310                 DEV_TX_OFFLOAD_TCP_CKSUM |
3311                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3312                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3313                 DEV_TX_OFFLOAD_TCP_TSO |
3314                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3315                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3316                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3317                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3318         dev_info->dev_capa =
3319                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3320                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3321
3322         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3323                                                 sizeof(uint32_t);
3324         dev_info->reta_size = pf->hash_lut_size;
3325         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3326
3327         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3328                 .rx_thresh = {
3329                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3330                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3331                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3332                 },
3333                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3334                 .rx_drop_en = 0,
3335                 .offloads = 0,
3336         };
3337
3338         dev_info->default_txconf = (struct rte_eth_txconf) {
3339                 .tx_thresh = {
3340                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3341                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3342                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3343                 },
3344                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3345                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3346                 .offloads = 0,
3347         };
3348
3349         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3350                 .nb_max = I40E_MAX_RING_DESC,
3351                 .nb_min = I40E_MIN_RING_DESC,
3352                 .nb_align = I40E_ALIGN_RING_DESC,
3353         };
3354
3355         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3356                 .nb_max = I40E_MAX_RING_DESC,
3357                 .nb_min = I40E_MIN_RING_DESC,
3358                 .nb_align = I40E_ALIGN_RING_DESC,
3359                 .nb_seg_max = I40E_TX_MAX_SEG,
3360                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3361         };
3362
3363         if (pf->flags & I40E_FLAG_VMDQ) {
3364                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3365                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3366                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3367                                                 pf->max_nb_vmdq_vsi;
3368                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3369                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3370                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3371         }
3372
3373         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3374                 /* For XL710 */
3375                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3376                 dev_info->default_rxportconf.nb_queues = 2;
3377                 dev_info->default_txportconf.nb_queues = 2;
3378                 if (dev->data->nb_rx_queues == 1)
3379                         dev_info->default_rxportconf.ring_size = 2048;
3380                 else
3381                         dev_info->default_rxportconf.ring_size = 1024;
3382                 if (dev->data->nb_tx_queues == 1)
3383                         dev_info->default_txportconf.ring_size = 1024;
3384                 else
3385                         dev_info->default_txportconf.ring_size = 512;
3386
3387         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3388                 /* For XXV710 */
3389                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3390                 dev_info->default_rxportconf.nb_queues = 1;
3391                 dev_info->default_txportconf.nb_queues = 1;
3392                 dev_info->default_rxportconf.ring_size = 256;
3393                 dev_info->default_txportconf.ring_size = 256;
3394         } else {
3395                 /* For X710 */
3396                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3397                 dev_info->default_rxportconf.nb_queues = 1;
3398                 dev_info->default_txportconf.nb_queues = 1;
3399                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3400                         dev_info->default_rxportconf.ring_size = 512;
3401                         dev_info->default_txportconf.ring_size = 256;
3402                 } else {
3403                         dev_info->default_rxportconf.ring_size = 256;
3404                         dev_info->default_txportconf.ring_size = 256;
3405                 }
3406         }
3407         dev_info->default_rxportconf.burst_size = 32;
3408         dev_info->default_txportconf.burst_size = 32;
3409 }
3410
3411 static int
3412 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3413 {
3414         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3415         struct i40e_vsi *vsi = pf->main_vsi;
3416         PMD_INIT_FUNC_TRACE();
3417
3418         if (on)
3419                 return i40e_vsi_add_vlan(vsi, vlan_id);
3420         else
3421                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3422 }
3423
3424 static int
3425 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3426                                 enum rte_vlan_type vlan_type,
3427                                 uint16_t tpid, int qinq)
3428 {
3429         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3430         uint64_t reg_r = 0;
3431         uint64_t reg_w = 0;
3432         uint16_t reg_id = 3;
3433         int ret;
3434
3435         if (qinq) {
3436                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3437                         reg_id = 2;
3438         }
3439
3440         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3441                                           &reg_r, NULL);
3442         if (ret != I40E_SUCCESS) {
3443                 PMD_DRV_LOG(ERR,
3444                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3445                            reg_id);
3446                 return -EIO;
3447         }
3448         PMD_DRV_LOG(DEBUG,
3449                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3450                     reg_id, reg_r);
3451
3452         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3453         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3454         if (reg_r == reg_w) {
3455                 PMD_DRV_LOG(DEBUG, "No need to write");
3456                 return 0;
3457         }
3458
3459         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3460                                            reg_w, NULL);
3461         if (ret != I40E_SUCCESS) {
3462                 PMD_DRV_LOG(ERR,
3463                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3464                             reg_id);
3465                 return -EIO;
3466         }
3467         PMD_DRV_LOG(DEBUG,
3468                     "Global register 0x%08x is changed with value 0x%08x",
3469                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3470
3471         return 0;
3472 }
3473
3474 static int
3475 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3476                    enum rte_vlan_type vlan_type,
3477                    uint16_t tpid)
3478 {
3479         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3480         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3481         int qinq = dev->data->dev_conf.rxmode.offloads &
3482                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3483         int ret = 0;
3484
3485         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3486              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3487             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3488                 PMD_DRV_LOG(ERR,
3489                             "Unsupported vlan type.");
3490                 return -EINVAL;
3491         }
3492
3493         if (pf->support_multi_driver) {
3494                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3495                 return -ENOTSUP;
3496         }
3497
3498         /* 802.1ad frames ability is added in NVM API 1.7*/
3499         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3500                 if (qinq) {
3501                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3502                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3503                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3504                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3505                 } else {
3506                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3507                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3508                 }
3509                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3510                 if (ret != I40E_SUCCESS) {
3511                         PMD_DRV_LOG(ERR,
3512                                     "Set switch config failed aq_err: %d",
3513                                     hw->aq.asq_last_status);
3514                         ret = -EIO;
3515                 }
3516         } else
3517                 /* If NVM API < 1.7, keep the register setting */
3518                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3519                                                       tpid, qinq);
3520         i40e_global_cfg_warning(I40E_WARNING_TPID);
3521
3522         return ret;
3523 }
3524
3525 static int
3526 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3527 {
3528         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3529         struct i40e_vsi *vsi = pf->main_vsi;
3530         struct rte_eth_rxmode *rxmode;
3531
3532         rxmode = &dev->data->dev_conf.rxmode;
3533         if (mask & ETH_VLAN_FILTER_MASK) {
3534                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3535                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3536                 else
3537                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3538         }
3539
3540         if (mask & ETH_VLAN_STRIP_MASK) {
3541                 /* Enable or disable VLAN stripping */
3542                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3543                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3544                 else
3545                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3546         }
3547
3548         if (mask & ETH_VLAN_EXTEND_MASK) {
3549                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
3550                         i40e_vsi_config_double_vlan(vsi, TRUE);
3551                         /* Set global registers with default ethertype. */
3552                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3553                                            ETHER_TYPE_VLAN);
3554                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3555                                            ETHER_TYPE_VLAN);
3556                 }
3557                 else
3558                         i40e_vsi_config_double_vlan(vsi, FALSE);
3559         }
3560
3561         return 0;
3562 }
3563
3564 static void
3565 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3566                           __rte_unused uint16_t queue,
3567                           __rte_unused int on)
3568 {
3569         PMD_INIT_FUNC_TRACE();
3570 }
3571
3572 static int
3573 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3574 {
3575         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3576         struct i40e_vsi *vsi = pf->main_vsi;
3577         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3578         struct i40e_vsi_vlan_pvid_info info;
3579
3580         memset(&info, 0, sizeof(info));
3581         info.on = on;
3582         if (info.on)
3583                 info.config.pvid = pvid;
3584         else {
3585                 info.config.reject.tagged =
3586                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3587                 info.config.reject.untagged =
3588                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3589         }
3590
3591         return i40e_vsi_vlan_pvid_set(vsi, &info);
3592 }
3593
3594 static int
3595 i40e_dev_led_on(struct rte_eth_dev *dev)
3596 {
3597         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3598         uint32_t mode = i40e_led_get(hw);
3599
3600         if (mode == 0)
3601                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3602
3603         return 0;
3604 }
3605
3606 static int
3607 i40e_dev_led_off(struct rte_eth_dev *dev)
3608 {
3609         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3610         uint32_t mode = i40e_led_get(hw);
3611
3612         if (mode != 0)
3613                 i40e_led_set(hw, 0, false);
3614
3615         return 0;
3616 }
3617
3618 static int
3619 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3620 {
3621         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3622         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3623
3624         fc_conf->pause_time = pf->fc_conf.pause_time;
3625
3626         /* read out from register, in case they are modified by other port */
3627         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3628                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3629         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3630                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3631
3632         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3633         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3634
3635          /* Return current mode according to actual setting*/
3636         switch (hw->fc.current_mode) {
3637         case I40E_FC_FULL:
3638                 fc_conf->mode = RTE_FC_FULL;
3639                 break;
3640         case I40E_FC_TX_PAUSE:
3641                 fc_conf->mode = RTE_FC_TX_PAUSE;
3642                 break;
3643         case I40E_FC_RX_PAUSE:
3644                 fc_conf->mode = RTE_FC_RX_PAUSE;
3645                 break;
3646         case I40E_FC_NONE:
3647         default:
3648                 fc_conf->mode = RTE_FC_NONE;
3649         };
3650
3651         return 0;
3652 }
3653
3654 static int
3655 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3656 {
3657         uint32_t mflcn_reg, fctrl_reg, reg;
3658         uint32_t max_high_water;
3659         uint8_t i, aq_failure;
3660         int err;
3661         struct i40e_hw *hw;
3662         struct i40e_pf *pf;
3663         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3664                 [RTE_FC_NONE] = I40E_FC_NONE,
3665                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3666                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3667                 [RTE_FC_FULL] = I40E_FC_FULL
3668         };
3669
3670         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3671
3672         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3673         if ((fc_conf->high_water > max_high_water) ||
3674                         (fc_conf->high_water < fc_conf->low_water)) {
3675                 PMD_INIT_LOG(ERR,
3676                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3677                         max_high_water);
3678                 return -EINVAL;
3679         }
3680
3681         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3682         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3683         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3684
3685         pf->fc_conf.pause_time = fc_conf->pause_time;
3686         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3687         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3688
3689         PMD_INIT_FUNC_TRACE();
3690
3691         /* All the link flow control related enable/disable register
3692          * configuration is handle by the F/W
3693          */
3694         err = i40e_set_fc(hw, &aq_failure, true);
3695         if (err < 0)
3696                 return -ENOSYS;
3697
3698         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3699                 /* Configure flow control refresh threshold,
3700                  * the value for stat_tx_pause_refresh_timer[8]
3701                  * is used for global pause operation.
3702                  */
3703
3704                 I40E_WRITE_REG(hw,
3705                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3706                                pf->fc_conf.pause_time);
3707
3708                 /* configure the timer value included in transmitted pause
3709                  * frame,
3710                  * the value for stat_tx_pause_quanta[8] is used for global
3711                  * pause operation
3712                  */
3713                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3714                                pf->fc_conf.pause_time);
3715
3716                 fctrl_reg = I40E_READ_REG(hw,
3717                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3718
3719                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3720                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3721                 else
3722                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3723
3724                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3725                                fctrl_reg);
3726         } else {
3727                 /* Configure pause time (2 TCs per register) */
3728                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3729                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3730                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3731
3732                 /* Configure flow control refresh threshold value */
3733                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3734                                pf->fc_conf.pause_time / 2);
3735
3736                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3737
3738                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3739                  *depending on configuration
3740                  */
3741                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3742                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3743                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3744                 } else {
3745                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3746                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3747                 }
3748
3749                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3750         }
3751
3752         if (!pf->support_multi_driver) {
3753                 /* config water marker both based on the packets and bytes */
3754                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3755                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3756                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3757                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3758                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3759                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3760                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3761                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3762                                   << I40E_KILOSHIFT);
3763                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3764                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3765                                    << I40E_KILOSHIFT);
3766                 i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
3767         } else {
3768                 PMD_DRV_LOG(ERR,
3769                             "Water marker configuration is not supported.");
3770         }
3771
3772         I40E_WRITE_FLUSH(hw);
3773
3774         return 0;
3775 }
3776
3777 static int
3778 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3779                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3780 {
3781         PMD_INIT_FUNC_TRACE();
3782
3783         return -ENOSYS;
3784 }
3785
3786 /* Add a MAC address, and update filters */
3787 static int
3788 i40e_macaddr_add(struct rte_eth_dev *dev,
3789                  struct ether_addr *mac_addr,
3790                  __rte_unused uint32_t index,
3791                  uint32_t pool)
3792 {
3793         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3794         struct i40e_mac_filter_info mac_filter;
3795         struct i40e_vsi *vsi;
3796         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
3797         int ret;
3798
3799         /* If VMDQ not enabled or configured, return */
3800         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3801                           !pf->nb_cfg_vmdq_vsi)) {
3802                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3803                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3804                         pool);
3805                 return -ENOTSUP;
3806         }
3807
3808         if (pool > pf->nb_cfg_vmdq_vsi) {
3809                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3810                                 pool, pf->nb_cfg_vmdq_vsi);
3811                 return -EINVAL;
3812         }
3813
3814         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3815         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3816                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3817         else
3818                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3819
3820         if (pool == 0)
3821                 vsi = pf->main_vsi;
3822         else
3823                 vsi = pf->vmdq[pool - 1].vsi;
3824
3825         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3826         if (ret != I40E_SUCCESS) {
3827                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3828                 return -ENODEV;
3829         }
3830         return 0;
3831 }
3832
3833 /* Remove a MAC address, and update filters */
3834 static void
3835 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3836 {
3837         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3838         struct i40e_vsi *vsi;
3839         struct rte_eth_dev_data *data = dev->data;
3840         struct ether_addr *macaddr;
3841         int ret;
3842         uint32_t i;
3843         uint64_t pool_sel;
3844
3845         macaddr = &(data->mac_addrs[index]);
3846
3847         pool_sel = dev->data->mac_pool_sel[index];
3848
3849         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3850                 if (pool_sel & (1ULL << i)) {
3851                         if (i == 0)
3852                                 vsi = pf->main_vsi;
3853                         else {
3854                                 /* No VMDQ pool enabled or configured */
3855                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3856                                         (i > pf->nb_cfg_vmdq_vsi)) {
3857                                         PMD_DRV_LOG(ERR,
3858                                                 "No VMDQ pool enabled/configured");
3859                                         return;
3860                                 }
3861                                 vsi = pf->vmdq[i - 1].vsi;
3862                         }
3863                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3864
3865                         if (ret) {
3866                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3867                                 return;
3868                         }
3869                 }
3870         }
3871 }
3872
3873 /* Set perfect match or hash match of MAC and VLAN for a VF */
3874 static int
3875 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3876                  struct rte_eth_mac_filter *filter,
3877                  bool add)
3878 {
3879         struct i40e_hw *hw;
3880         struct i40e_mac_filter_info mac_filter;
3881         struct ether_addr old_mac;
3882         struct ether_addr *new_mac;
3883         struct i40e_pf_vf *vf = NULL;
3884         uint16_t vf_id;
3885         int ret;
3886
3887         if (pf == NULL) {
3888                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3889                 return -EINVAL;
3890         }
3891         hw = I40E_PF_TO_HW(pf);
3892
3893         if (filter == NULL) {
3894                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3895                 return -EINVAL;
3896         }
3897
3898         new_mac = &filter->mac_addr;
3899
3900         if (is_zero_ether_addr(new_mac)) {
3901                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3902                 return -EINVAL;
3903         }
3904
3905         vf_id = filter->dst_id;
3906
3907         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3908                 PMD_DRV_LOG(ERR, "Invalid argument.");
3909                 return -EINVAL;
3910         }
3911         vf = &pf->vfs[vf_id];
3912
3913         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3914                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3915                 return -EINVAL;
3916         }
3917
3918         if (add) {
3919                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3920                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3921                                 ETHER_ADDR_LEN);
3922                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3923                                  ETHER_ADDR_LEN);
3924
3925                 mac_filter.filter_type = filter->filter_type;
3926                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3927                 if (ret != I40E_SUCCESS) {
3928                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3929                         return -1;
3930                 }
3931                 ether_addr_copy(new_mac, &pf->dev_addr);
3932         } else {
3933                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3934                                 ETHER_ADDR_LEN);
3935                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3936                 if (ret != I40E_SUCCESS) {
3937                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3938                         return -1;
3939                 }
3940
3941                 /* Clear device address as it has been removed */
3942                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3943                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3944         }
3945
3946         return 0;
3947 }
3948
3949 /* MAC filter handle */
3950 static int
3951 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3952                 void *arg)
3953 {
3954         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3955         struct rte_eth_mac_filter *filter;
3956         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3957         int ret = I40E_NOT_SUPPORTED;
3958
3959         filter = (struct rte_eth_mac_filter *)(arg);
3960
3961         switch (filter_op) {
3962         case RTE_ETH_FILTER_NOP:
3963                 ret = I40E_SUCCESS;
3964                 break;
3965         case RTE_ETH_FILTER_ADD:
3966                 i40e_pf_disable_irq0(hw);
3967                 if (filter->is_vf)
3968                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3969                 i40e_pf_enable_irq0(hw);
3970                 break;
3971         case RTE_ETH_FILTER_DELETE:
3972                 i40e_pf_disable_irq0(hw);
3973                 if (filter->is_vf)
3974                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3975                 i40e_pf_enable_irq0(hw);
3976                 break;
3977         default:
3978                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3979                 ret = I40E_ERR_PARAM;
3980                 break;
3981         }
3982
3983         return ret;
3984 }
3985
3986 static int
3987 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3988 {
3989         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3990         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3991         uint32_t reg;
3992         int ret;
3993
3994         if (!lut)
3995                 return -EINVAL;
3996
3997         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3998                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3999                                           lut, lut_size);
4000                 if (ret) {
4001                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4002                         return ret;
4003                 }
4004         } else {
4005                 uint32_t *lut_dw = (uint32_t *)lut;
4006                 uint16_t i, lut_size_dw = lut_size / 4;
4007
4008                 if (vsi->type == I40E_VSI_SRIOV) {
4009                         for (i = 0; i <= lut_size_dw; i++) {
4010                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4011                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4012                         }
4013                 } else {
4014                         for (i = 0; i < lut_size_dw; i++)
4015                                 lut_dw[i] = I40E_READ_REG(hw,
4016                                                           I40E_PFQF_HLUT(i));
4017                 }
4018         }
4019
4020         return 0;
4021 }
4022
4023 int
4024 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4025 {
4026         struct i40e_pf *pf;
4027         struct i40e_hw *hw;
4028         int ret;
4029
4030         if (!vsi || !lut)
4031                 return -EINVAL;
4032
4033         pf = I40E_VSI_TO_PF(vsi);
4034         hw = I40E_VSI_TO_HW(vsi);
4035
4036         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4037                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
4038                                           lut, lut_size);
4039                 if (ret) {
4040                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4041                         return ret;
4042                 }
4043         } else {
4044                 uint32_t *lut_dw = (uint32_t *)lut;
4045                 uint16_t i, lut_size_dw = lut_size / 4;
4046
4047                 if (vsi->type == I40E_VSI_SRIOV) {
4048                         for (i = 0; i < lut_size_dw; i++)
4049                                 I40E_WRITE_REG(
4050                                         hw,
4051                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4052                                         lut_dw[i]);
4053                 } else {
4054                         for (i = 0; i < lut_size_dw; i++)
4055                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4056                                                lut_dw[i]);
4057                 }
4058                 I40E_WRITE_FLUSH(hw);
4059         }
4060
4061         return 0;
4062 }
4063
4064 static int
4065 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4066                          struct rte_eth_rss_reta_entry64 *reta_conf,
4067                          uint16_t reta_size)
4068 {
4069         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4070         uint16_t i, lut_size = pf->hash_lut_size;
4071         uint16_t idx, shift;
4072         uint8_t *lut;
4073         int ret;
4074
4075         if (reta_size != lut_size ||
4076                 reta_size > ETH_RSS_RETA_SIZE_512) {
4077                 PMD_DRV_LOG(ERR,
4078                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4079                         reta_size, lut_size);
4080                 return -EINVAL;
4081         }
4082
4083         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4084         if (!lut) {
4085                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4086                 return -ENOMEM;
4087         }
4088         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4089         if (ret)
4090                 goto out;
4091         for (i = 0; i < reta_size; i++) {
4092                 idx = i / RTE_RETA_GROUP_SIZE;
4093                 shift = i % RTE_RETA_GROUP_SIZE;
4094                 if (reta_conf[idx].mask & (1ULL << shift))
4095                         lut[i] = reta_conf[idx].reta[shift];
4096         }
4097         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4098
4099 out:
4100         rte_free(lut);
4101
4102         return ret;
4103 }
4104
4105 static int
4106 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4107                         struct rte_eth_rss_reta_entry64 *reta_conf,
4108                         uint16_t reta_size)
4109 {
4110         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4111         uint16_t i, lut_size = pf->hash_lut_size;
4112         uint16_t idx, shift;
4113         uint8_t *lut;
4114         int ret;
4115
4116         if (reta_size != lut_size ||
4117                 reta_size > ETH_RSS_RETA_SIZE_512) {
4118                 PMD_DRV_LOG(ERR,
4119                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4120                         reta_size, lut_size);
4121                 return -EINVAL;
4122         }
4123
4124         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4125         if (!lut) {
4126                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4127                 return -ENOMEM;
4128         }
4129
4130         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4131         if (ret)
4132                 goto out;
4133         for (i = 0; i < reta_size; i++) {
4134                 idx = i / RTE_RETA_GROUP_SIZE;
4135                 shift = i % RTE_RETA_GROUP_SIZE;
4136                 if (reta_conf[idx].mask & (1ULL << shift))
4137                         reta_conf[idx].reta[shift] = lut[i];
4138         }
4139
4140 out:
4141         rte_free(lut);
4142
4143         return ret;
4144 }
4145
4146 /**
4147  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4148  * @hw:   pointer to the HW structure
4149  * @mem:  pointer to mem struct to fill out
4150  * @size: size of memory requested
4151  * @alignment: what to align the allocation to
4152  **/
4153 enum i40e_status_code
4154 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4155                         struct i40e_dma_mem *mem,
4156                         u64 size,
4157                         u32 alignment)
4158 {
4159         const struct rte_memzone *mz = NULL;
4160         char z_name[RTE_MEMZONE_NAMESIZE];
4161
4162         if (!mem)
4163                 return I40E_ERR_PARAM;
4164
4165         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4166         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4167                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4168         if (!mz)
4169                 return I40E_ERR_NO_MEMORY;
4170
4171         mem->size = size;
4172         mem->va = mz->addr;
4173         mem->pa = mz->iova;
4174         mem->zone = (const void *)mz;
4175         PMD_DRV_LOG(DEBUG,
4176                 "memzone %s allocated with physical address: %"PRIu64,
4177                 mz->name, mem->pa);
4178
4179         return I40E_SUCCESS;
4180 }
4181
4182 /**
4183  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4184  * @hw:   pointer to the HW structure
4185  * @mem:  ptr to mem struct to free
4186  **/
4187 enum i40e_status_code
4188 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4189                     struct i40e_dma_mem *mem)
4190 {
4191         if (!mem)
4192                 return I40E_ERR_PARAM;
4193
4194         PMD_DRV_LOG(DEBUG,
4195                 "memzone %s to be freed with physical address: %"PRIu64,
4196                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4197         rte_memzone_free((const struct rte_memzone *)mem->zone);
4198         mem->zone = NULL;
4199         mem->va = NULL;
4200         mem->pa = (u64)0;
4201
4202         return I40E_SUCCESS;
4203 }
4204
4205 /**
4206  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4207  * @hw:   pointer to the HW structure
4208  * @mem:  pointer to mem struct to fill out
4209  * @size: size of memory requested
4210  **/
4211 enum i40e_status_code
4212 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4213                          struct i40e_virt_mem *mem,
4214                          u32 size)
4215 {
4216         if (!mem)
4217                 return I40E_ERR_PARAM;
4218
4219         mem->size = size;
4220         mem->va = rte_zmalloc("i40e", size, 0);
4221
4222         if (mem->va)
4223                 return I40E_SUCCESS;
4224         else
4225                 return I40E_ERR_NO_MEMORY;
4226 }
4227
4228 /**
4229  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4230  * @hw:   pointer to the HW structure
4231  * @mem:  pointer to mem struct to free
4232  **/
4233 enum i40e_status_code
4234 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4235                      struct i40e_virt_mem *mem)
4236 {
4237         if (!mem)
4238                 return I40E_ERR_PARAM;
4239
4240         rte_free(mem->va);
4241         mem->va = NULL;
4242
4243         return I40E_SUCCESS;
4244 }
4245
4246 void
4247 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4248 {
4249         rte_spinlock_init(&sp->spinlock);
4250 }
4251
4252 void
4253 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4254 {
4255         rte_spinlock_lock(&sp->spinlock);
4256 }
4257
4258 void
4259 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4260 {
4261         rte_spinlock_unlock(&sp->spinlock);
4262 }
4263
4264 void
4265 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4266 {
4267         return;
4268 }
4269
4270 /**
4271  * Get the hardware capabilities, which will be parsed
4272  * and saved into struct i40e_hw.
4273  */
4274 static int
4275 i40e_get_cap(struct i40e_hw *hw)
4276 {
4277         struct i40e_aqc_list_capabilities_element_resp *buf;
4278         uint16_t len, size = 0;
4279         int ret;
4280
4281         /* Calculate a huge enough buff for saving response data temporarily */
4282         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4283                                                 I40E_MAX_CAP_ELE_NUM;
4284         buf = rte_zmalloc("i40e", len, 0);
4285         if (!buf) {
4286                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4287                 return I40E_ERR_NO_MEMORY;
4288         }
4289
4290         /* Get, parse the capabilities and save it to hw */
4291         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4292                         i40e_aqc_opc_list_func_capabilities, NULL);
4293         if (ret != I40E_SUCCESS)
4294                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4295
4296         /* Free the temporary buffer after being used */
4297         rte_free(buf);
4298
4299         return ret;
4300 }
4301
4302 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4303 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4304
4305 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4306                 const char *value,
4307                 void *opaque)
4308 {
4309         struct i40e_pf *pf;
4310         unsigned long num;
4311         char *end;
4312
4313         pf = (struct i40e_pf *)opaque;
4314         RTE_SET_USED(key);
4315
4316         errno = 0;
4317         num = strtoul(value, &end, 0);
4318         if (errno != 0 || end == value || *end != 0) {
4319                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4320                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4321                 return -(EINVAL);
4322         }
4323
4324         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4325                 pf->vf_nb_qp_max = (uint16_t)num;
4326         else
4327                 /* here return 0 to make next valid same argument work */
4328                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4329                             "power of 2 and equal or less than 16 !, Now it is "
4330                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4331
4332         return 0;
4333 }
4334
4335 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4336 {
4337         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4338         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4339         struct rte_kvargs *kvlist;
4340
4341         /* set default queue number per VF as 4 */
4342         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4343
4344         if (dev->device->devargs == NULL)
4345                 return 0;
4346
4347         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4348         if (kvlist == NULL)
4349                 return -(EINVAL);
4350
4351         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4352                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4353                             "the first invalid or last valid one is used !",
4354                             QUEUE_NUM_PER_VF_ARG);
4355
4356         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4357                            i40e_pf_parse_vf_queue_number_handler, pf);
4358
4359         rte_kvargs_free(kvlist);
4360
4361         return 0;
4362 }
4363
4364 static int
4365 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4366 {
4367         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4368         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4369         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4370         uint16_t qp_count = 0, vsi_count = 0;
4371
4372         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4373                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4374                 return -EINVAL;
4375         }
4376
4377         i40e_pf_config_vf_rxq_number(dev);
4378
4379         /* Add the parameter init for LFC */
4380         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4381         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4382         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4383
4384         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4385         pf->max_num_vsi = hw->func_caps.num_vsis;
4386         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4387         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4388
4389         /* FDir queue/VSI allocation */
4390         pf->fdir_qp_offset = 0;
4391         if (hw->func_caps.fd) {
4392                 pf->flags |= I40E_FLAG_FDIR;
4393                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4394         } else {
4395                 pf->fdir_nb_qps = 0;
4396         }
4397         qp_count += pf->fdir_nb_qps;
4398         vsi_count += 1;
4399
4400         /* LAN queue/VSI allocation */
4401         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4402         if (!hw->func_caps.rss) {
4403                 pf->lan_nb_qps = 1;
4404         } else {
4405                 pf->flags |= I40E_FLAG_RSS;
4406                 if (hw->mac.type == I40E_MAC_X722)
4407                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4408                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4409         }
4410         qp_count += pf->lan_nb_qps;
4411         vsi_count += 1;
4412
4413         /* VF queue/VSI allocation */
4414         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4415         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4416                 pf->flags |= I40E_FLAG_SRIOV;
4417                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4418                 pf->vf_num = pci_dev->max_vfs;
4419                 PMD_DRV_LOG(DEBUG,
4420                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4421                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4422         } else {
4423                 pf->vf_nb_qps = 0;
4424                 pf->vf_num = 0;
4425         }
4426         qp_count += pf->vf_nb_qps * pf->vf_num;
4427         vsi_count += pf->vf_num;
4428
4429         /* VMDq queue/VSI allocation */
4430         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4431         pf->vmdq_nb_qps = 0;
4432         pf->max_nb_vmdq_vsi = 0;
4433         if (hw->func_caps.vmdq) {
4434                 if (qp_count < hw->func_caps.num_tx_qp &&
4435                         vsi_count < hw->func_caps.num_vsis) {
4436                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4437                                 qp_count) / pf->vmdq_nb_qp_max;
4438
4439                         /* Limit the maximum number of VMDq vsi to the maximum
4440                          * ethdev can support
4441                          */
4442                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4443                                 hw->func_caps.num_vsis - vsi_count);
4444                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4445                                 ETH_64_POOLS);
4446                         if (pf->max_nb_vmdq_vsi) {
4447                                 pf->flags |= I40E_FLAG_VMDQ;
4448                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4449                                 PMD_DRV_LOG(DEBUG,
4450                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4451                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4452                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4453                         } else {
4454                                 PMD_DRV_LOG(INFO,
4455                                         "No enough queues left for VMDq");
4456                         }
4457                 } else {
4458                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4459                 }
4460         }
4461         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4462         vsi_count += pf->max_nb_vmdq_vsi;
4463
4464         if (hw->func_caps.dcb)
4465                 pf->flags |= I40E_FLAG_DCB;
4466
4467         if (qp_count > hw->func_caps.num_tx_qp) {
4468                 PMD_DRV_LOG(ERR,
4469                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4470                         qp_count, hw->func_caps.num_tx_qp);
4471                 return -EINVAL;
4472         }
4473         if (vsi_count > hw->func_caps.num_vsis) {
4474                 PMD_DRV_LOG(ERR,
4475                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4476                         vsi_count, hw->func_caps.num_vsis);
4477                 return -EINVAL;
4478         }
4479
4480         return 0;
4481 }
4482
4483 static int
4484 i40e_pf_get_switch_config(struct i40e_pf *pf)
4485 {
4486         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4487         struct i40e_aqc_get_switch_config_resp *switch_config;
4488         struct i40e_aqc_switch_config_element_resp *element;
4489         uint16_t start_seid = 0, num_reported;
4490         int ret;
4491
4492         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4493                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4494         if (!switch_config) {
4495                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4496                 return -ENOMEM;
4497         }
4498
4499         /* Get the switch configurations */
4500         ret = i40e_aq_get_switch_config(hw, switch_config,
4501                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4502         if (ret != I40E_SUCCESS) {
4503                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4504                 goto fail;
4505         }
4506         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4507         if (num_reported != 1) { /* The number should be 1 */
4508                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4509                 goto fail;
4510         }
4511
4512         /* Parse the switch configuration elements */
4513         element = &(switch_config->element[0]);
4514         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4515                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4516                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4517         } else
4518                 PMD_DRV_LOG(INFO, "Unknown element type");
4519
4520 fail:
4521         rte_free(switch_config);
4522
4523         return ret;
4524 }
4525
4526 static int
4527 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4528                         uint32_t num)
4529 {
4530         struct pool_entry *entry;
4531
4532         if (pool == NULL || num == 0)
4533                 return -EINVAL;
4534
4535         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4536         if (entry == NULL) {
4537                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4538                 return -ENOMEM;
4539         }
4540
4541         /* queue heap initialize */
4542         pool->num_free = num;
4543         pool->num_alloc = 0;
4544         pool->base = base;
4545         LIST_INIT(&pool->alloc_list);
4546         LIST_INIT(&pool->free_list);
4547
4548         /* Initialize element  */
4549         entry->base = 0;
4550         entry->len = num;
4551
4552         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4553         return 0;
4554 }
4555
4556 static void
4557 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4558 {
4559         struct pool_entry *entry, *next_entry;
4560
4561         if (pool == NULL)
4562                 return;
4563
4564         for (entry = LIST_FIRST(&pool->alloc_list);
4565                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4566                         entry = next_entry) {
4567                 LIST_REMOVE(entry, next);
4568                 rte_free(entry);
4569         }
4570
4571         for (entry = LIST_FIRST(&pool->free_list);
4572                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4573                         entry = next_entry) {
4574                 LIST_REMOVE(entry, next);
4575                 rte_free(entry);
4576         }
4577
4578         pool->num_free = 0;
4579         pool->num_alloc = 0;
4580         pool->base = 0;
4581         LIST_INIT(&pool->alloc_list);
4582         LIST_INIT(&pool->free_list);
4583 }
4584
4585 static int
4586 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4587                        uint32_t base)
4588 {
4589         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4590         uint32_t pool_offset;
4591         int insert;
4592
4593         if (pool == NULL) {
4594                 PMD_DRV_LOG(ERR, "Invalid parameter");
4595                 return -EINVAL;
4596         }
4597
4598         pool_offset = base - pool->base;
4599         /* Lookup in alloc list */
4600         LIST_FOREACH(entry, &pool->alloc_list, next) {
4601                 if (entry->base == pool_offset) {
4602                         valid_entry = entry;
4603                         LIST_REMOVE(entry, next);
4604                         break;
4605                 }
4606         }
4607
4608         /* Not find, return */
4609         if (valid_entry == NULL) {
4610                 PMD_DRV_LOG(ERR, "Failed to find entry");
4611                 return -EINVAL;
4612         }
4613
4614         /**
4615          * Found it, move it to free list  and try to merge.
4616          * In order to make merge easier, always sort it by qbase.
4617          * Find adjacent prev and last entries.
4618          */
4619         prev = next = NULL;
4620         LIST_FOREACH(entry, &pool->free_list, next) {
4621                 if (entry->base > valid_entry->base) {
4622                         next = entry;
4623                         break;
4624                 }
4625                 prev = entry;
4626         }
4627
4628         insert = 0;
4629         /* Try to merge with next one*/
4630         if (next != NULL) {
4631                 /* Merge with next one */
4632                 if (valid_entry->base + valid_entry->len == next->base) {
4633                         next->base = valid_entry->base;
4634                         next->len += valid_entry->len;
4635                         rte_free(valid_entry);
4636                         valid_entry = next;
4637                         insert = 1;
4638                 }
4639         }
4640
4641         if (prev != NULL) {
4642                 /* Merge with previous one */
4643                 if (prev->base + prev->len == valid_entry->base) {
4644                         prev->len += valid_entry->len;
4645                         /* If it merge with next one, remove next node */
4646                         if (insert == 1) {
4647                                 LIST_REMOVE(valid_entry, next);
4648                                 rte_free(valid_entry);
4649                         } else {
4650                                 rte_free(valid_entry);
4651                                 insert = 1;
4652                         }
4653                 }
4654         }
4655
4656         /* Not find any entry to merge, insert */
4657         if (insert == 0) {
4658                 if (prev != NULL)
4659                         LIST_INSERT_AFTER(prev, valid_entry, next);
4660                 else if (next != NULL)
4661                         LIST_INSERT_BEFORE(next, valid_entry, next);
4662                 else /* It's empty list, insert to head */
4663                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4664         }
4665
4666         pool->num_free += valid_entry->len;
4667         pool->num_alloc -= valid_entry->len;
4668
4669         return 0;
4670 }
4671
4672 static int
4673 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4674                        uint16_t num)
4675 {
4676         struct pool_entry *entry, *valid_entry;
4677
4678         if (pool == NULL || num == 0) {
4679                 PMD_DRV_LOG(ERR, "Invalid parameter");
4680                 return -EINVAL;
4681         }
4682
4683         if (pool->num_free < num) {
4684                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4685                             num, pool->num_free);
4686                 return -ENOMEM;
4687         }
4688
4689         valid_entry = NULL;
4690         /* Lookup  in free list and find most fit one */
4691         LIST_FOREACH(entry, &pool->free_list, next) {
4692                 if (entry->len >= num) {
4693                         /* Find best one */
4694                         if (entry->len == num) {
4695                                 valid_entry = entry;
4696                                 break;
4697                         }
4698                         if (valid_entry == NULL || valid_entry->len > entry->len)
4699                                 valid_entry = entry;
4700                 }
4701         }
4702
4703         /* Not find one to satisfy the request, return */
4704         if (valid_entry == NULL) {
4705                 PMD_DRV_LOG(ERR, "No valid entry found");
4706                 return -ENOMEM;
4707         }
4708         /**
4709          * The entry have equal queue number as requested,
4710          * remove it from alloc_list.
4711          */
4712         if (valid_entry->len == num) {
4713                 LIST_REMOVE(valid_entry, next);
4714         } else {
4715                 /**
4716                  * The entry have more numbers than requested,
4717                  * create a new entry for alloc_list and minus its
4718                  * queue base and number in free_list.
4719                  */
4720                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4721                 if (entry == NULL) {
4722                         PMD_DRV_LOG(ERR,
4723                                 "Failed to allocate memory for resource pool");
4724                         return -ENOMEM;
4725                 }
4726                 entry->base = valid_entry->base;
4727                 entry->len = num;
4728                 valid_entry->base += num;
4729                 valid_entry->len -= num;
4730                 valid_entry = entry;
4731         }
4732
4733         /* Insert it into alloc list, not sorted */
4734         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4735
4736         pool->num_free -= valid_entry->len;
4737         pool->num_alloc += valid_entry->len;
4738
4739         return valid_entry->base + pool->base;
4740 }
4741
4742 /**
4743  * bitmap_is_subset - Check whether src2 is subset of src1
4744  **/
4745 static inline int
4746 bitmap_is_subset(uint8_t src1, uint8_t src2)
4747 {
4748         return !((src1 ^ src2) & src2);
4749 }
4750
4751 static enum i40e_status_code
4752 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4753 {
4754         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4755
4756         /* If DCB is not supported, only default TC is supported */
4757         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4758                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4759                 return I40E_NOT_SUPPORTED;
4760         }
4761
4762         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4763                 PMD_DRV_LOG(ERR,
4764                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4765                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4766                 return I40E_NOT_SUPPORTED;
4767         }
4768         return I40E_SUCCESS;
4769 }
4770
4771 int
4772 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4773                                 struct i40e_vsi_vlan_pvid_info *info)
4774 {
4775         struct i40e_hw *hw;
4776         struct i40e_vsi_context ctxt;
4777         uint8_t vlan_flags = 0;
4778         int ret;
4779
4780         if (vsi == NULL || info == NULL) {
4781                 PMD_DRV_LOG(ERR, "invalid parameters");
4782                 return I40E_ERR_PARAM;
4783         }
4784
4785         if (info->on) {
4786                 vsi->info.pvid = info->config.pvid;
4787                 /**
4788                  * If insert pvid is enabled, only tagged pkts are
4789                  * allowed to be sent out.
4790                  */
4791                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4792                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4793         } else {
4794                 vsi->info.pvid = 0;
4795                 if (info->config.reject.tagged == 0)
4796                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4797
4798                 if (info->config.reject.untagged == 0)
4799                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4800         }
4801         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4802                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4803         vsi->info.port_vlan_flags |= vlan_flags;
4804         vsi->info.valid_sections =
4805                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4806         memset(&ctxt, 0, sizeof(ctxt));
4807         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4808         ctxt.seid = vsi->seid;
4809
4810         hw = I40E_VSI_TO_HW(vsi);
4811         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4812         if (ret != I40E_SUCCESS)
4813                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4814
4815         return ret;
4816 }
4817
4818 static int
4819 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4820 {
4821         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4822         int i, ret;
4823         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4824
4825         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4826         if (ret != I40E_SUCCESS)
4827                 return ret;
4828
4829         if (!vsi->seid) {
4830                 PMD_DRV_LOG(ERR, "seid not valid");
4831                 return -EINVAL;
4832         }
4833
4834         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4835         tc_bw_data.tc_valid_bits = enabled_tcmap;
4836         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4837                 tc_bw_data.tc_bw_credits[i] =
4838                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4839
4840         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4841         if (ret != I40E_SUCCESS) {
4842                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4843                 return ret;
4844         }
4845
4846         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4847                                         sizeof(vsi->info.qs_handle));
4848         return I40E_SUCCESS;
4849 }
4850
4851 static enum i40e_status_code
4852 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4853                                  struct i40e_aqc_vsi_properties_data *info,
4854                                  uint8_t enabled_tcmap)
4855 {
4856         enum i40e_status_code ret;
4857         int i, total_tc = 0;
4858         uint16_t qpnum_per_tc, bsf, qp_idx;
4859
4860         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4861         if (ret != I40E_SUCCESS)
4862                 return ret;
4863
4864         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4865                 if (enabled_tcmap & (1 << i))
4866                         total_tc++;
4867         if (total_tc == 0)
4868                 total_tc = 1;
4869         vsi->enabled_tc = enabled_tcmap;
4870
4871         /* Number of queues per enabled TC */
4872         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4873         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4874         bsf = rte_bsf32(qpnum_per_tc);
4875
4876         /* Adjust the queue number to actual queues that can be applied */
4877         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4878                 vsi->nb_qps = qpnum_per_tc * total_tc;
4879
4880         /**
4881          * Configure TC and queue mapping parameters, for enabled TC,
4882          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4883          * default queue will serve it.
4884          */
4885         qp_idx = 0;
4886         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4887                 if (vsi->enabled_tc & (1 << i)) {
4888                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4889                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4890                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4891                         qp_idx += qpnum_per_tc;
4892                 } else
4893                         info->tc_mapping[i] = 0;
4894         }
4895
4896         /* Associate queue number with VSI */
4897         if (vsi->type == I40E_VSI_SRIOV) {
4898                 info->mapping_flags |=
4899                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4900                 for (i = 0; i < vsi->nb_qps; i++)
4901                         info->queue_mapping[i] =
4902                                 rte_cpu_to_le_16(vsi->base_queue + i);
4903         } else {
4904                 info->mapping_flags |=
4905                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4906                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4907         }
4908         info->valid_sections |=
4909                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4910
4911         return I40E_SUCCESS;
4912 }
4913
4914 static int
4915 i40e_veb_release(struct i40e_veb *veb)
4916 {
4917         struct i40e_vsi *vsi;
4918         struct i40e_hw *hw;
4919
4920         if (veb == NULL)
4921                 return -EINVAL;
4922
4923         if (!TAILQ_EMPTY(&veb->head)) {
4924                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4925                 return -EACCES;
4926         }
4927         /* associate_vsi field is NULL for floating VEB */
4928         if (veb->associate_vsi != NULL) {
4929                 vsi = veb->associate_vsi;
4930                 hw = I40E_VSI_TO_HW(vsi);
4931
4932                 vsi->uplink_seid = veb->uplink_seid;
4933                 vsi->veb = NULL;
4934         } else {
4935                 veb->associate_pf->main_vsi->floating_veb = NULL;
4936                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4937         }
4938
4939         i40e_aq_delete_element(hw, veb->seid, NULL);
4940         rte_free(veb);
4941         return I40E_SUCCESS;
4942 }
4943
4944 /* Setup a veb */
4945 static struct i40e_veb *
4946 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4947 {
4948         struct i40e_veb *veb;
4949         int ret;
4950         struct i40e_hw *hw;
4951
4952         if (pf == NULL) {
4953                 PMD_DRV_LOG(ERR,
4954                             "veb setup failed, associated PF shouldn't null");
4955                 return NULL;
4956         }
4957         hw = I40E_PF_TO_HW(pf);
4958
4959         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4960         if (!veb) {
4961                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4962                 goto fail;
4963         }
4964
4965         veb->associate_vsi = vsi;
4966         veb->associate_pf = pf;
4967         TAILQ_INIT(&veb->head);
4968         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4969
4970         /* create floating veb if vsi is NULL */
4971         if (vsi != NULL) {
4972                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4973                                       I40E_DEFAULT_TCMAP, false,
4974                                       &veb->seid, false, NULL);
4975         } else {
4976                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4977                                       true, &veb->seid, false, NULL);
4978         }
4979
4980         if (ret != I40E_SUCCESS) {
4981                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4982                             hw->aq.asq_last_status);
4983                 goto fail;
4984         }
4985         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4986
4987         /* get statistics index */
4988         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4989                                 &veb->stats_idx, NULL, NULL, NULL);
4990         if (ret != I40E_SUCCESS) {
4991                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4992                             hw->aq.asq_last_status);
4993                 goto fail;
4994         }
4995         /* Get VEB bandwidth, to be implemented */
4996         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4997         if (vsi)
4998                 vsi->uplink_seid = veb->seid;
4999
5000         return veb;
5001 fail:
5002         rte_free(veb);
5003         return NULL;
5004 }
5005
5006 int
5007 i40e_vsi_release(struct i40e_vsi *vsi)
5008 {
5009         struct i40e_pf *pf;
5010         struct i40e_hw *hw;
5011         struct i40e_vsi_list *vsi_list;
5012         void *temp;
5013         int ret;
5014         struct i40e_mac_filter *f;
5015         uint16_t user_param;
5016
5017         if (!vsi)
5018                 return I40E_SUCCESS;
5019
5020         if (!vsi->adapter)
5021                 return -EFAULT;
5022
5023         user_param = vsi->user_param;
5024
5025         pf = I40E_VSI_TO_PF(vsi);
5026         hw = I40E_VSI_TO_HW(vsi);
5027
5028         /* VSI has child to attach, release child first */
5029         if (vsi->veb) {
5030                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5031                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5032                                 return -1;
5033                 }
5034                 i40e_veb_release(vsi->veb);
5035         }
5036
5037         if (vsi->floating_veb) {
5038                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5039                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5040                                 return -1;
5041                 }
5042         }
5043
5044         /* Remove all macvlan filters of the VSI */
5045         i40e_vsi_remove_all_macvlan_filter(vsi);
5046         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5047                 rte_free(f);
5048
5049         if (vsi->type != I40E_VSI_MAIN &&
5050             ((vsi->type != I40E_VSI_SRIOV) ||
5051             !pf->floating_veb_list[user_param])) {
5052                 /* Remove vsi from parent's sibling list */
5053                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5054                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5055                         return I40E_ERR_PARAM;
5056                 }
5057                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5058                                 &vsi->sib_vsi_list, list);
5059
5060                 /* Remove all switch element of the VSI */
5061                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5062                 if (ret != I40E_SUCCESS)
5063                         PMD_DRV_LOG(ERR, "Failed to delete element");
5064         }
5065
5066         if ((vsi->type == I40E_VSI_SRIOV) &&
5067             pf->floating_veb_list[user_param]) {
5068                 /* Remove vsi from parent's sibling list */
5069                 if (vsi->parent_vsi == NULL ||
5070                     vsi->parent_vsi->floating_veb == NULL) {
5071                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5072                         return I40E_ERR_PARAM;
5073                 }
5074                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5075                              &vsi->sib_vsi_list, list);
5076
5077                 /* Remove all switch element of the VSI */
5078                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5079                 if (ret != I40E_SUCCESS)
5080                         PMD_DRV_LOG(ERR, "Failed to delete element");
5081         }
5082
5083         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5084
5085         if (vsi->type != I40E_VSI_SRIOV)
5086                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5087         rte_free(vsi);
5088
5089         return I40E_SUCCESS;
5090 }
5091
5092 static int
5093 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5094 {
5095         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5096         struct i40e_aqc_remove_macvlan_element_data def_filter;
5097         struct i40e_mac_filter_info filter;
5098         int ret;
5099
5100         if (vsi->type != I40E_VSI_MAIN)
5101                 return I40E_ERR_CONFIG;
5102         memset(&def_filter, 0, sizeof(def_filter));
5103         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5104                                         ETH_ADDR_LEN);
5105         def_filter.vlan_tag = 0;
5106         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5107                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5108         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5109         if (ret != I40E_SUCCESS) {
5110                 struct i40e_mac_filter *f;
5111                 struct ether_addr *mac;
5112
5113                 PMD_DRV_LOG(DEBUG,
5114                             "Cannot remove the default macvlan filter");
5115                 /* It needs to add the permanent mac into mac list */
5116                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5117                 if (f == NULL) {
5118                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5119                         return I40E_ERR_NO_MEMORY;
5120                 }
5121                 mac = &f->mac_info.mac_addr;
5122                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5123                                 ETH_ADDR_LEN);
5124                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5125                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5126                 vsi->mac_num++;
5127
5128                 return ret;
5129         }
5130         rte_memcpy(&filter.mac_addr,
5131                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5132         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5133         return i40e_vsi_add_mac(vsi, &filter);
5134 }
5135
5136 /*
5137  * i40e_vsi_get_bw_config - Query VSI BW Information
5138  * @vsi: the VSI to be queried
5139  *
5140  * Returns 0 on success, negative value on failure
5141  */
5142 static enum i40e_status_code
5143 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5144 {
5145         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5146         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5147         struct i40e_hw *hw = &vsi->adapter->hw;
5148         i40e_status ret;
5149         int i;
5150         uint32_t bw_max;
5151
5152         memset(&bw_config, 0, sizeof(bw_config));
5153         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5154         if (ret != I40E_SUCCESS) {
5155                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5156                             hw->aq.asq_last_status);
5157                 return ret;
5158         }
5159
5160         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5161         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5162                                         &ets_sla_config, NULL);
5163         if (ret != I40E_SUCCESS) {
5164                 PMD_DRV_LOG(ERR,
5165                         "VSI failed to get TC bandwdith configuration %u",
5166                         hw->aq.asq_last_status);
5167                 return ret;
5168         }
5169
5170         /* store and print out BW info */
5171         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5172         vsi->bw_info.bw_max = bw_config.max_bw;
5173         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5174         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5175         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5176                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5177                      I40E_16_BIT_WIDTH);
5178         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5179                 vsi->bw_info.bw_ets_share_credits[i] =
5180                                 ets_sla_config.share_credits[i];
5181                 vsi->bw_info.bw_ets_credits[i] =
5182                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5183                 /* 4 bits per TC, 4th bit is reserved */
5184                 vsi->bw_info.bw_ets_max[i] =
5185                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5186                                   RTE_LEN2MASK(3, uint8_t));
5187                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5188                             vsi->bw_info.bw_ets_share_credits[i]);
5189                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5190                             vsi->bw_info.bw_ets_credits[i]);
5191                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5192                             vsi->bw_info.bw_ets_max[i]);
5193         }
5194
5195         return I40E_SUCCESS;
5196 }
5197
5198 /* i40e_enable_pf_lb
5199  * @pf: pointer to the pf structure
5200  *
5201  * allow loopback on pf
5202  */
5203 static inline void
5204 i40e_enable_pf_lb(struct i40e_pf *pf)
5205 {
5206         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5207         struct i40e_vsi_context ctxt;
5208         int ret;
5209
5210         /* Use the FW API if FW >= v5.0 */
5211         if (hw->aq.fw_maj_ver < 5) {
5212                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5213                 return;
5214         }
5215
5216         memset(&ctxt, 0, sizeof(ctxt));
5217         ctxt.seid = pf->main_vsi_seid;
5218         ctxt.pf_num = hw->pf_id;
5219         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5220         if (ret) {
5221                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5222                             ret, hw->aq.asq_last_status);
5223                 return;
5224         }
5225         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5226         ctxt.info.valid_sections =
5227                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5228         ctxt.info.switch_id |=
5229                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5230
5231         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5232         if (ret)
5233                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5234                             hw->aq.asq_last_status);
5235 }
5236
5237 /* Setup a VSI */
5238 struct i40e_vsi *
5239 i40e_vsi_setup(struct i40e_pf *pf,
5240                enum i40e_vsi_type type,
5241                struct i40e_vsi *uplink_vsi,
5242                uint16_t user_param)
5243 {
5244         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5245         struct i40e_vsi *vsi;
5246         struct i40e_mac_filter_info filter;
5247         int ret;
5248         struct i40e_vsi_context ctxt;
5249         struct ether_addr broadcast =
5250                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5251
5252         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5253             uplink_vsi == NULL) {
5254                 PMD_DRV_LOG(ERR,
5255                         "VSI setup failed, VSI link shouldn't be NULL");
5256                 return NULL;
5257         }
5258
5259         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5260                 PMD_DRV_LOG(ERR,
5261                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5262                 return NULL;
5263         }
5264
5265         /* two situations
5266          * 1.type is not MAIN and uplink vsi is not NULL
5267          * If uplink vsi didn't setup VEB, create one first under veb field
5268          * 2.type is SRIOV and the uplink is NULL
5269          * If floating VEB is NULL, create one veb under floating veb field
5270          */
5271
5272         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5273             uplink_vsi->veb == NULL) {
5274                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5275
5276                 if (uplink_vsi->veb == NULL) {
5277                         PMD_DRV_LOG(ERR, "VEB setup failed");
5278                         return NULL;
5279                 }
5280                 /* set ALLOWLOOPBACk on pf, when veb is created */
5281                 i40e_enable_pf_lb(pf);
5282         }
5283
5284         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5285             pf->main_vsi->floating_veb == NULL) {
5286                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5287
5288                 if (pf->main_vsi->floating_veb == NULL) {
5289                         PMD_DRV_LOG(ERR, "VEB setup failed");
5290                         return NULL;
5291                 }
5292         }
5293
5294         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5295         if (!vsi) {
5296                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5297                 return NULL;
5298         }
5299         TAILQ_INIT(&vsi->mac_list);
5300         vsi->type = type;
5301         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5302         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5303         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5304         vsi->user_param = user_param;
5305         vsi->vlan_anti_spoof_on = 0;
5306         vsi->vlan_filter_on = 0;
5307         /* Allocate queues */
5308         switch (vsi->type) {
5309         case I40E_VSI_MAIN  :
5310                 vsi->nb_qps = pf->lan_nb_qps;
5311                 break;
5312         case I40E_VSI_SRIOV :
5313                 vsi->nb_qps = pf->vf_nb_qps;
5314                 break;
5315         case I40E_VSI_VMDQ2:
5316                 vsi->nb_qps = pf->vmdq_nb_qps;
5317                 break;
5318         case I40E_VSI_FDIR:
5319                 vsi->nb_qps = pf->fdir_nb_qps;
5320                 break;
5321         default:
5322                 goto fail_mem;
5323         }
5324         /*
5325          * The filter status descriptor is reported in rx queue 0,
5326          * while the tx queue for fdir filter programming has no
5327          * such constraints, can be non-zero queues.
5328          * To simplify it, choose FDIR vsi use queue 0 pair.
5329          * To make sure it will use queue 0 pair, queue allocation
5330          * need be done before this function is called
5331          */
5332         if (type != I40E_VSI_FDIR) {
5333                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5334                         if (ret < 0) {
5335                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5336                                                 vsi->seid, ret);
5337                                 goto fail_mem;
5338                         }
5339                         vsi->base_queue = ret;
5340         } else
5341                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5342
5343         /* VF has MSIX interrupt in VF range, don't allocate here */
5344         if (type == I40E_VSI_MAIN) {
5345                 if (pf->support_multi_driver) {
5346                         /* If support multi-driver, need to use INT0 instead of
5347                          * allocating from msix pool. The Msix pool is init from
5348                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5349                          * to 1 without calling i40e_res_pool_alloc.
5350                          */
5351                         vsi->msix_intr = 0;
5352                         vsi->nb_msix = 1;
5353                 } else {
5354                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5355                                                   RTE_MIN(vsi->nb_qps,
5356                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5357                         if (ret < 0) {
5358                                 PMD_DRV_LOG(ERR,
5359                                             "VSI MAIN %d get heap failed %d",
5360                                             vsi->seid, ret);
5361                                 goto fail_queue_alloc;
5362                         }
5363                         vsi->msix_intr = ret;
5364                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5365                                                RTE_MAX_RXTX_INTR_VEC_ID);
5366                 }
5367         } else if (type != I40E_VSI_SRIOV) {
5368                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5369                 if (ret < 0) {
5370                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5371                         goto fail_queue_alloc;
5372                 }
5373                 vsi->msix_intr = ret;
5374                 vsi->nb_msix = 1;
5375         } else {
5376                 vsi->msix_intr = 0;
5377                 vsi->nb_msix = 0;
5378         }
5379
5380         /* Add VSI */
5381         if (type == I40E_VSI_MAIN) {
5382                 /* For main VSI, no need to add since it's default one */
5383                 vsi->uplink_seid = pf->mac_seid;
5384                 vsi->seid = pf->main_vsi_seid;
5385                 /* Bind queues with specific MSIX interrupt */
5386                 /**
5387                  * Needs 2 interrupt at least, one for misc cause which will
5388                  * enabled from OS side, Another for queues binding the
5389                  * interrupt from device side only.
5390                  */
5391
5392                 /* Get default VSI parameters from hardware */
5393                 memset(&ctxt, 0, sizeof(ctxt));
5394                 ctxt.seid = vsi->seid;
5395                 ctxt.pf_num = hw->pf_id;
5396                 ctxt.uplink_seid = vsi->uplink_seid;
5397                 ctxt.vf_num = 0;
5398                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5399                 if (ret != I40E_SUCCESS) {
5400                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5401                         goto fail_msix_alloc;
5402                 }
5403                 rte_memcpy(&vsi->info, &ctxt.info,
5404                         sizeof(struct i40e_aqc_vsi_properties_data));
5405                 vsi->vsi_id = ctxt.vsi_number;
5406                 vsi->info.valid_sections = 0;
5407
5408                 /* Configure tc, enabled TC0 only */
5409                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5410                         I40E_SUCCESS) {
5411                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5412                         goto fail_msix_alloc;
5413                 }
5414
5415                 /* TC, queue mapping */
5416                 memset(&ctxt, 0, sizeof(ctxt));
5417                 vsi->info.valid_sections |=
5418                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5419                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5420                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5421                 rte_memcpy(&ctxt.info, &vsi->info,
5422                         sizeof(struct i40e_aqc_vsi_properties_data));
5423                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5424                                                 I40E_DEFAULT_TCMAP);
5425                 if (ret != I40E_SUCCESS) {
5426                         PMD_DRV_LOG(ERR,
5427                                 "Failed to configure TC queue mapping");
5428                         goto fail_msix_alloc;
5429                 }
5430                 ctxt.seid = vsi->seid;
5431                 ctxt.pf_num = hw->pf_id;
5432                 ctxt.uplink_seid = vsi->uplink_seid;
5433                 ctxt.vf_num = 0;
5434
5435                 /* Update VSI parameters */
5436                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5437                 if (ret != I40E_SUCCESS) {
5438                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5439                         goto fail_msix_alloc;
5440                 }
5441
5442                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5443                                                 sizeof(vsi->info.tc_mapping));
5444                 rte_memcpy(&vsi->info.queue_mapping,
5445                                 &ctxt.info.queue_mapping,
5446                         sizeof(vsi->info.queue_mapping));
5447                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5448                 vsi->info.valid_sections = 0;
5449
5450                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5451                                 ETH_ADDR_LEN);
5452
5453                 /**
5454                  * Updating default filter settings are necessary to prevent
5455                  * reception of tagged packets.
5456                  * Some old firmware configurations load a default macvlan
5457                  * filter which accepts both tagged and untagged packets.
5458                  * The updating is to use a normal filter instead if needed.
5459                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5460                  * The firmware with correct configurations load the default
5461                  * macvlan filter which is expected and cannot be removed.
5462                  */
5463                 i40e_update_default_filter_setting(vsi);
5464                 i40e_config_qinq(hw, vsi);
5465         } else if (type == I40E_VSI_SRIOV) {
5466                 memset(&ctxt, 0, sizeof(ctxt));
5467                 /**
5468                  * For other VSI, the uplink_seid equals to uplink VSI's
5469                  * uplink_seid since they share same VEB
5470                  */
5471                 if (uplink_vsi == NULL)
5472                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5473                 else
5474                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5475                 ctxt.pf_num = hw->pf_id;
5476                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5477                 ctxt.uplink_seid = vsi->uplink_seid;
5478                 ctxt.connection_type = 0x1;
5479                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5480
5481                 /* Use the VEB configuration if FW >= v5.0 */
5482                 if (hw->aq.fw_maj_ver >= 5) {
5483                         /* Configure switch ID */
5484                         ctxt.info.valid_sections |=
5485                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5486                         ctxt.info.switch_id =
5487                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5488                 }
5489
5490                 /* Configure port/vlan */
5491                 ctxt.info.valid_sections |=
5492                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5493                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5494                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5495                                                 hw->func_caps.enabled_tcmap);
5496                 if (ret != I40E_SUCCESS) {
5497                         PMD_DRV_LOG(ERR,
5498                                 "Failed to configure TC queue mapping");
5499                         goto fail_msix_alloc;
5500                 }
5501
5502                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5503                 ctxt.info.valid_sections |=
5504                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5505                 /**
5506                  * Since VSI is not created yet, only configure parameter,
5507                  * will add vsi below.
5508                  */
5509
5510                 i40e_config_qinq(hw, vsi);
5511         } else if (type == I40E_VSI_VMDQ2) {
5512                 memset(&ctxt, 0, sizeof(ctxt));
5513                 /*
5514                  * For other VSI, the uplink_seid equals to uplink VSI's
5515                  * uplink_seid since they share same VEB
5516                  */
5517                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5518                 ctxt.pf_num = hw->pf_id;
5519                 ctxt.vf_num = 0;
5520                 ctxt.uplink_seid = vsi->uplink_seid;
5521                 ctxt.connection_type = 0x1;
5522                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5523
5524                 ctxt.info.valid_sections |=
5525                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5526                 /* user_param carries flag to enable loop back */
5527                 if (user_param) {
5528                         ctxt.info.switch_id =
5529                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5530                         ctxt.info.switch_id |=
5531                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5532                 }
5533
5534                 /* Configure port/vlan */
5535                 ctxt.info.valid_sections |=
5536                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5537                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5538                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5539                                                 I40E_DEFAULT_TCMAP);
5540                 if (ret != I40E_SUCCESS) {
5541                         PMD_DRV_LOG(ERR,
5542                                 "Failed to configure TC queue mapping");
5543                         goto fail_msix_alloc;
5544                 }
5545                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5546                 ctxt.info.valid_sections |=
5547                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5548         } else if (type == I40E_VSI_FDIR) {
5549                 memset(&ctxt, 0, sizeof(ctxt));
5550                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5551                 ctxt.pf_num = hw->pf_id;
5552                 ctxt.vf_num = 0;
5553                 ctxt.uplink_seid = vsi->uplink_seid;
5554                 ctxt.connection_type = 0x1;     /* regular data port */
5555                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5556                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5557                                                 I40E_DEFAULT_TCMAP);
5558                 if (ret != I40E_SUCCESS) {
5559                         PMD_DRV_LOG(ERR,
5560                                 "Failed to configure TC queue mapping.");
5561                         goto fail_msix_alloc;
5562                 }
5563                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5564                 ctxt.info.valid_sections |=
5565                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5566         } else {
5567                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5568                 goto fail_msix_alloc;
5569         }
5570
5571         if (vsi->type != I40E_VSI_MAIN) {
5572                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5573                 if (ret != I40E_SUCCESS) {
5574                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5575                                     hw->aq.asq_last_status);
5576                         goto fail_msix_alloc;
5577                 }
5578                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5579                 vsi->info.valid_sections = 0;
5580                 vsi->seid = ctxt.seid;
5581                 vsi->vsi_id = ctxt.vsi_number;
5582                 vsi->sib_vsi_list.vsi = vsi;
5583                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5584                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5585                                           &vsi->sib_vsi_list, list);
5586                 } else {
5587                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5588                                           &vsi->sib_vsi_list, list);
5589                 }
5590         }
5591
5592         /* MAC/VLAN configuration */
5593         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5594         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5595
5596         ret = i40e_vsi_add_mac(vsi, &filter);
5597         if (ret != I40E_SUCCESS) {
5598                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5599                 goto fail_msix_alloc;
5600         }
5601
5602         /* Get VSI BW information */
5603         i40e_vsi_get_bw_config(vsi);
5604         return vsi;
5605 fail_msix_alloc:
5606         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5607 fail_queue_alloc:
5608         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5609 fail_mem:
5610         rte_free(vsi);
5611         return NULL;
5612 }
5613
5614 /* Configure vlan filter on or off */
5615 int
5616 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5617 {
5618         int i, num;
5619         struct i40e_mac_filter *f;
5620         void *temp;
5621         struct i40e_mac_filter_info *mac_filter;
5622         enum rte_mac_filter_type desired_filter;
5623         int ret = I40E_SUCCESS;
5624
5625         if (on) {
5626                 /* Filter to match MAC and VLAN */
5627                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5628         } else {
5629                 /* Filter to match only MAC */
5630                 desired_filter = RTE_MAC_PERFECT_MATCH;
5631         }
5632
5633         num = vsi->mac_num;
5634
5635         mac_filter = rte_zmalloc("mac_filter_info_data",
5636                                  num * sizeof(*mac_filter), 0);
5637         if (mac_filter == NULL) {
5638                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5639                 return I40E_ERR_NO_MEMORY;
5640         }
5641
5642         i = 0;
5643
5644         /* Remove all existing mac */
5645         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5646                 mac_filter[i] = f->mac_info;
5647                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5648                 if (ret) {
5649                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5650                                     on ? "enable" : "disable");
5651                         goto DONE;
5652                 }
5653                 i++;
5654         }
5655
5656         /* Override with new filter */
5657         for (i = 0; i < num; i++) {
5658                 mac_filter[i].filter_type = desired_filter;
5659                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5660                 if (ret) {
5661                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5662                                     on ? "enable" : "disable");
5663                         goto DONE;
5664                 }
5665         }
5666
5667 DONE:
5668         rte_free(mac_filter);
5669         return ret;
5670 }
5671
5672 /* Configure vlan stripping on or off */
5673 int
5674 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5675 {
5676         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5677         struct i40e_vsi_context ctxt;
5678         uint8_t vlan_flags;
5679         int ret = I40E_SUCCESS;
5680
5681         /* Check if it has been already on or off */
5682         if (vsi->info.valid_sections &
5683                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5684                 if (on) {
5685                         if ((vsi->info.port_vlan_flags &
5686                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5687                                 return 0; /* already on */
5688                 } else {
5689                         if ((vsi->info.port_vlan_flags &
5690                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5691                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5692                                 return 0; /* already off */
5693                 }
5694         }
5695
5696         if (on)
5697                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5698         else
5699                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5700         vsi->info.valid_sections =
5701                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5702         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5703         vsi->info.port_vlan_flags |= vlan_flags;
5704         ctxt.seid = vsi->seid;
5705         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5706         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5707         if (ret)
5708                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5709                             on ? "enable" : "disable");
5710
5711         return ret;
5712 }
5713
5714 static int
5715 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5716 {
5717         struct rte_eth_dev_data *data = dev->data;
5718         int ret;
5719         int mask = 0;
5720
5721         /* Apply vlan offload setting */
5722         mask = ETH_VLAN_STRIP_MASK |
5723                ETH_VLAN_FILTER_MASK |
5724                ETH_VLAN_EXTEND_MASK;
5725         ret = i40e_vlan_offload_set(dev, mask);
5726         if (ret) {
5727                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5728                 return ret;
5729         }
5730
5731         /* Apply pvid setting */
5732         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5733                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5734         if (ret)
5735                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5736
5737         return ret;
5738 }
5739
5740 static int
5741 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5742 {
5743         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5744
5745         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5746 }
5747
5748 static int
5749 i40e_update_flow_control(struct i40e_hw *hw)
5750 {
5751 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5752         struct i40e_link_status link_status;
5753         uint32_t rxfc = 0, txfc = 0, reg;
5754         uint8_t an_info;
5755         int ret;
5756
5757         memset(&link_status, 0, sizeof(link_status));
5758         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5759         if (ret != I40E_SUCCESS) {
5760                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5761                 goto write_reg; /* Disable flow control */
5762         }
5763
5764         an_info = hw->phy.link_info.an_info;
5765         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5766                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5767                 ret = I40E_ERR_NOT_READY;
5768                 goto write_reg; /* Disable flow control */
5769         }
5770         /**
5771          * If link auto negotiation is enabled, flow control needs to
5772          * be configured according to it
5773          */
5774         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5775         case I40E_LINK_PAUSE_RXTX:
5776                 rxfc = 1;
5777                 txfc = 1;
5778                 hw->fc.current_mode = I40E_FC_FULL;
5779                 break;
5780         case I40E_AQ_LINK_PAUSE_RX:
5781                 rxfc = 1;
5782                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5783                 break;
5784         case I40E_AQ_LINK_PAUSE_TX:
5785                 txfc = 1;
5786                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5787                 break;
5788         default:
5789                 hw->fc.current_mode = I40E_FC_NONE;
5790                 break;
5791         }
5792
5793 write_reg:
5794         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5795                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5796         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5797         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5798         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5799         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5800
5801         return ret;
5802 }
5803
5804 /* PF setup */
5805 static int
5806 i40e_pf_setup(struct i40e_pf *pf)
5807 {
5808         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5809         struct i40e_filter_control_settings settings;
5810         struct i40e_vsi *vsi;
5811         int ret;
5812
5813         /* Clear all stats counters */
5814         pf->offset_loaded = FALSE;
5815         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5816         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5817         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5818         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5819
5820         ret = i40e_pf_get_switch_config(pf);
5821         if (ret != I40E_SUCCESS) {
5822                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5823                 return ret;
5824         }
5825
5826         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
5827         if (ret)
5828                 PMD_INIT_LOG(WARNING,
5829                         "failed to allocate switch domain for device %d", ret);
5830
5831         if (pf->flags & I40E_FLAG_FDIR) {
5832                 /* make queue allocated first, let FDIR use queue pair 0*/
5833                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5834                 if (ret != I40E_FDIR_QUEUE_ID) {
5835                         PMD_DRV_LOG(ERR,
5836                                 "queue allocation fails for FDIR: ret =%d",
5837                                 ret);
5838                         pf->flags &= ~I40E_FLAG_FDIR;
5839                 }
5840         }
5841         /*  main VSI setup */
5842         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5843         if (!vsi) {
5844                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5845                 return I40E_ERR_NOT_READY;
5846         }
5847         pf->main_vsi = vsi;
5848
5849         /* Configure filter control */
5850         memset(&settings, 0, sizeof(settings));
5851         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5852                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5853         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5854                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5855         else {
5856                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5857                         hw->func_caps.rss_table_size);
5858                 return I40E_ERR_PARAM;
5859         }
5860         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5861                 hw->func_caps.rss_table_size);
5862         pf->hash_lut_size = hw->func_caps.rss_table_size;
5863
5864         /* Enable ethtype and macvlan filters */
5865         settings.enable_ethtype = TRUE;
5866         settings.enable_macvlan = TRUE;
5867         ret = i40e_set_filter_control(hw, &settings);
5868         if (ret)
5869                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5870                                                                 ret);
5871
5872         /* Update flow control according to the auto negotiation */
5873         i40e_update_flow_control(hw);
5874
5875         return I40E_SUCCESS;
5876 }
5877
5878 int
5879 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5880 {
5881         uint32_t reg;
5882         uint16_t j;
5883
5884         /**
5885          * Set or clear TX Queue Disable flags,
5886          * which is required by hardware.
5887          */
5888         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5889         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5890
5891         /* Wait until the request is finished */
5892         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5893                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5894                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5895                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5896                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5897                                                         & 0x1))) {
5898                         break;
5899                 }
5900         }
5901         if (on) {
5902                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5903                         return I40E_SUCCESS; /* already on, skip next steps */
5904
5905                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5906                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5907         } else {
5908                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5909                         return I40E_SUCCESS; /* already off, skip next steps */
5910                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5911         }
5912         /* Write the register */
5913         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5914         /* Check the result */
5915         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5916                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5917                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5918                 if (on) {
5919                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5920                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5921                                 break;
5922                 } else {
5923                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5924                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5925                                 break;
5926                 }
5927         }
5928         /* Check if it is timeout */
5929         if (j >= I40E_CHK_Q_ENA_COUNT) {
5930                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5931                             (on ? "enable" : "disable"), q_idx);
5932                 return I40E_ERR_TIMEOUT;
5933         }
5934
5935         return I40E_SUCCESS;
5936 }
5937
5938 /* Swith on or off the tx queues */
5939 static int
5940 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5941 {
5942         struct rte_eth_dev_data *dev_data = pf->dev_data;
5943         struct i40e_tx_queue *txq;
5944         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5945         uint16_t i;
5946         int ret;
5947
5948         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5949                 txq = dev_data->tx_queues[i];
5950                 /* Don't operate the queue if not configured or
5951                  * if starting only per queue */
5952                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5953                         continue;
5954                 if (on)
5955                         ret = i40e_dev_tx_queue_start(dev, i);
5956                 else
5957                         ret = i40e_dev_tx_queue_stop(dev, i);
5958                 if ( ret != I40E_SUCCESS)
5959                         return ret;
5960         }
5961
5962         return I40E_SUCCESS;
5963 }
5964
5965 int
5966 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5967 {
5968         uint32_t reg;
5969         uint16_t j;
5970
5971         /* Wait until the request is finished */
5972         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5973                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5974                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5975                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5976                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5977                         break;
5978         }
5979
5980         if (on) {
5981                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5982                         return I40E_SUCCESS; /* Already on, skip next steps */
5983                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5984         } else {
5985                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5986                         return I40E_SUCCESS; /* Already off, skip next steps */
5987                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5988         }
5989
5990         /* Write the register */
5991         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5992         /* Check the result */
5993         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5994                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5995                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5996                 if (on) {
5997                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5998                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5999                                 break;
6000                 } else {
6001                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6002                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6003                                 break;
6004                 }
6005         }
6006
6007         /* Check if it is timeout */
6008         if (j >= I40E_CHK_Q_ENA_COUNT) {
6009                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6010                             (on ? "enable" : "disable"), q_idx);
6011                 return I40E_ERR_TIMEOUT;
6012         }
6013
6014         return I40E_SUCCESS;
6015 }
6016 /* Switch on or off the rx queues */
6017 static int
6018 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
6019 {
6020         struct rte_eth_dev_data *dev_data = pf->dev_data;
6021         struct i40e_rx_queue *rxq;
6022         struct rte_eth_dev *dev = pf->adapter->eth_dev;
6023         uint16_t i;
6024         int ret;
6025
6026         for (i = 0; i < dev_data->nb_rx_queues; i++) {
6027                 rxq = dev_data->rx_queues[i];
6028                 /* Don't operate the queue if not configured or
6029                  * if starting only per queue */
6030                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
6031                         continue;
6032                 if (on)
6033                         ret = i40e_dev_rx_queue_start(dev, i);
6034                 else
6035                         ret = i40e_dev_rx_queue_stop(dev, i);
6036                 if (ret != I40E_SUCCESS)
6037                         return ret;
6038         }
6039
6040         return I40E_SUCCESS;
6041 }
6042
6043 /* Switch on or off all the rx/tx queues */
6044 int
6045 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
6046 {
6047         int ret;
6048
6049         if (on) {
6050                 /* enable rx queues before enabling tx queues */
6051                 ret = i40e_dev_switch_rx_queues(pf, on);
6052                 if (ret) {
6053                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
6054                         return ret;
6055                 }
6056                 ret = i40e_dev_switch_tx_queues(pf, on);
6057         } else {
6058                 /* Stop tx queues before stopping rx queues */
6059                 ret = i40e_dev_switch_tx_queues(pf, on);
6060                 if (ret) {
6061                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
6062                         return ret;
6063                 }
6064                 ret = i40e_dev_switch_rx_queues(pf, on);
6065         }
6066
6067         return ret;
6068 }
6069
6070 /* Initialize VSI for TX */
6071 static int
6072 i40e_dev_tx_init(struct i40e_pf *pf)
6073 {
6074         struct rte_eth_dev_data *data = pf->dev_data;
6075         uint16_t i;
6076         uint32_t ret = I40E_SUCCESS;
6077         struct i40e_tx_queue *txq;
6078
6079         for (i = 0; i < data->nb_tx_queues; i++) {
6080                 txq = data->tx_queues[i];
6081                 if (!txq || !txq->q_set)
6082                         continue;
6083                 ret = i40e_tx_queue_init(txq);
6084                 if (ret != I40E_SUCCESS)
6085                         break;
6086         }
6087         if (ret == I40E_SUCCESS)
6088                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6089                                      ->eth_dev);
6090
6091         return ret;
6092 }
6093
6094 /* Initialize VSI for RX */
6095 static int
6096 i40e_dev_rx_init(struct i40e_pf *pf)
6097 {
6098         struct rte_eth_dev_data *data = pf->dev_data;
6099         int ret = I40E_SUCCESS;
6100         uint16_t i;
6101         struct i40e_rx_queue *rxq;
6102
6103         i40e_pf_config_mq_rx(pf);
6104         for (i = 0; i < data->nb_rx_queues; i++) {
6105                 rxq = data->rx_queues[i];
6106                 if (!rxq || !rxq->q_set)
6107                         continue;
6108
6109                 ret = i40e_rx_queue_init(rxq);
6110                 if (ret != I40E_SUCCESS) {
6111                         PMD_DRV_LOG(ERR,
6112                                 "Failed to do RX queue initialization");
6113                         break;
6114                 }
6115         }
6116         if (ret == I40E_SUCCESS)
6117                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6118                                      ->eth_dev);
6119
6120         return ret;
6121 }
6122
6123 static int
6124 i40e_dev_rxtx_init(struct i40e_pf *pf)
6125 {
6126         int err;
6127
6128         err = i40e_dev_tx_init(pf);
6129         if (err) {
6130                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6131                 return err;
6132         }
6133         err = i40e_dev_rx_init(pf);
6134         if (err) {
6135                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6136                 return err;
6137         }
6138
6139         return err;
6140 }
6141
6142 static int
6143 i40e_vmdq_setup(struct rte_eth_dev *dev)
6144 {
6145         struct rte_eth_conf *conf = &dev->data->dev_conf;
6146         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6147         int i, err, conf_vsis, j, loop;
6148         struct i40e_vsi *vsi;
6149         struct i40e_vmdq_info *vmdq_info;
6150         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6151         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6152
6153         /*
6154          * Disable interrupt to avoid message from VF. Furthermore, it will
6155          * avoid race condition in VSI creation/destroy.
6156          */
6157         i40e_pf_disable_irq0(hw);
6158
6159         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6160                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6161                 return -ENOTSUP;
6162         }
6163
6164         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6165         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6166                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6167                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6168                         pf->max_nb_vmdq_vsi);
6169                 return -ENOTSUP;
6170         }
6171
6172         if (pf->vmdq != NULL) {
6173                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6174                 return 0;
6175         }
6176
6177         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6178                                 sizeof(*vmdq_info) * conf_vsis, 0);
6179
6180         if (pf->vmdq == NULL) {
6181                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6182                 return -ENOMEM;
6183         }
6184
6185         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6186
6187         /* Create VMDQ VSI */
6188         for (i = 0; i < conf_vsis; i++) {
6189                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6190                                 vmdq_conf->enable_loop_back);
6191                 if (vsi == NULL) {
6192                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6193                         err = -1;
6194                         goto err_vsi_setup;
6195                 }
6196                 vmdq_info = &pf->vmdq[i];
6197                 vmdq_info->pf = pf;
6198                 vmdq_info->vsi = vsi;
6199         }
6200         pf->nb_cfg_vmdq_vsi = conf_vsis;
6201
6202         /* Configure Vlan */
6203         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6204         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6205                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6206                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6207                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6208                                         vmdq_conf->pool_map[i].vlan_id, j);
6209
6210                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6211                                                 vmdq_conf->pool_map[i].vlan_id);
6212                                 if (err) {
6213                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6214                                         err = -1;
6215                                         goto err_vsi_setup;
6216                                 }
6217                         }
6218                 }
6219         }
6220
6221         i40e_pf_enable_irq0(hw);
6222
6223         return 0;
6224
6225 err_vsi_setup:
6226         for (i = 0; i < conf_vsis; i++)
6227                 if (pf->vmdq[i].vsi == NULL)
6228                         break;
6229                 else
6230                         i40e_vsi_release(pf->vmdq[i].vsi);
6231
6232         rte_free(pf->vmdq);
6233         pf->vmdq = NULL;
6234         i40e_pf_enable_irq0(hw);
6235         return err;
6236 }
6237
6238 static void
6239 i40e_stat_update_32(struct i40e_hw *hw,
6240                    uint32_t reg,
6241                    bool offset_loaded,
6242                    uint64_t *offset,
6243                    uint64_t *stat)
6244 {
6245         uint64_t new_data;
6246
6247         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6248         if (!offset_loaded)
6249                 *offset = new_data;
6250
6251         if (new_data >= *offset)
6252                 *stat = (uint64_t)(new_data - *offset);
6253         else
6254                 *stat = (uint64_t)((new_data +
6255                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6256 }
6257
6258 static void
6259 i40e_stat_update_48(struct i40e_hw *hw,
6260                    uint32_t hireg,
6261                    uint32_t loreg,
6262                    bool offset_loaded,
6263                    uint64_t *offset,
6264                    uint64_t *stat)
6265 {
6266         uint64_t new_data;
6267
6268         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6269         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6270                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6271
6272         if (!offset_loaded)
6273                 *offset = new_data;
6274
6275         if (new_data >= *offset)
6276                 *stat = new_data - *offset;
6277         else
6278                 *stat = (uint64_t)((new_data +
6279                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6280
6281         *stat &= I40E_48_BIT_MASK;
6282 }
6283
6284 /* Disable IRQ0 */
6285 void
6286 i40e_pf_disable_irq0(struct i40e_hw *hw)
6287 {
6288         /* Disable all interrupt types */
6289         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6290                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6291         I40E_WRITE_FLUSH(hw);
6292 }
6293
6294 /* Enable IRQ0 */
6295 void
6296 i40e_pf_enable_irq0(struct i40e_hw *hw)
6297 {
6298         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6299                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6300                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6301                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6302         I40E_WRITE_FLUSH(hw);
6303 }
6304
6305 static void
6306 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6307 {
6308         /* read pending request and disable first */
6309         i40e_pf_disable_irq0(hw);
6310         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6311         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6312                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6313
6314         if (no_queue)
6315                 /* Link no queues with irq0 */
6316                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6317                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6318 }
6319
6320 static void
6321 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6322 {
6323         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6324         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6325         int i;
6326         uint16_t abs_vf_id;
6327         uint32_t index, offset, val;
6328
6329         if (!pf->vfs)
6330                 return;
6331         /**
6332          * Try to find which VF trigger a reset, use absolute VF id to access
6333          * since the reg is global register.
6334          */
6335         for (i = 0; i < pf->vf_num; i++) {
6336                 abs_vf_id = hw->func_caps.vf_base_id + i;
6337                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6338                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6339                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6340                 /* VFR event occurred */
6341                 if (val & (0x1 << offset)) {
6342                         int ret;
6343
6344                         /* Clear the event first */
6345                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6346                                                         (0x1 << offset));
6347                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6348                         /**
6349                          * Only notify a VF reset event occurred,
6350                          * don't trigger another SW reset
6351                          */
6352                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6353                         if (ret != I40E_SUCCESS)
6354                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6355                 }
6356         }
6357 }
6358
6359 static void
6360 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6361 {
6362         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6363         int i;
6364
6365         for (i = 0; i < pf->vf_num; i++)
6366                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6367 }
6368
6369 static void
6370 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6371 {
6372         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6373         struct i40e_arq_event_info info;
6374         uint16_t pending, opcode;
6375         int ret;
6376
6377         info.buf_len = I40E_AQ_BUF_SZ;
6378         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6379         if (!info.msg_buf) {
6380                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6381                 return;
6382         }
6383
6384         pending = 1;
6385         while (pending) {
6386                 ret = i40e_clean_arq_element(hw, &info, &pending);
6387
6388                 if (ret != I40E_SUCCESS) {
6389                         PMD_DRV_LOG(INFO,
6390                                 "Failed to read msg from AdminQ, aq_err: %u",
6391                                 hw->aq.asq_last_status);
6392                         break;
6393                 }
6394                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6395
6396                 switch (opcode) {
6397                 case i40e_aqc_opc_send_msg_to_pf:
6398                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6399                         i40e_pf_host_handle_vf_msg(dev,
6400                                         rte_le_to_cpu_16(info.desc.retval),
6401                                         rte_le_to_cpu_32(info.desc.cookie_high),
6402                                         rte_le_to_cpu_32(info.desc.cookie_low),
6403                                         info.msg_buf,
6404                                         info.msg_len);
6405                         break;
6406                 case i40e_aqc_opc_get_link_status:
6407                         ret = i40e_dev_link_update(dev, 0);
6408                         if (!ret)
6409                                 _rte_eth_dev_callback_process(dev,
6410                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6411                         break;
6412                 default:
6413                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6414                                     opcode);
6415                         break;
6416                 }
6417         }
6418         rte_free(info.msg_buf);
6419 }
6420
6421 /**
6422  * Interrupt handler triggered by NIC  for handling
6423  * specific interrupt.
6424  *
6425  * @param handle
6426  *  Pointer to interrupt handle.
6427  * @param param
6428  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6429  *
6430  * @return
6431  *  void
6432  */
6433 static void
6434 i40e_dev_interrupt_handler(void *param)
6435 {
6436         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6437         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6438         uint32_t icr0;
6439
6440         /* Disable interrupt */
6441         i40e_pf_disable_irq0(hw);
6442
6443         /* read out interrupt causes */
6444         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6445
6446         /* No interrupt event indicated */
6447         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6448                 PMD_DRV_LOG(INFO, "No interrupt event");
6449                 goto done;
6450         }
6451         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6452                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6453         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6454                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6455         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6456                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6457         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6458                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6459         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6460                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6461         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6462                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6463         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6464                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6465
6466         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6467                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6468                 i40e_dev_handle_vfr_event(dev);
6469         }
6470         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6471                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6472                 i40e_dev_handle_aq_msg(dev);
6473         }
6474
6475 done:
6476         /* Enable interrupt */
6477         i40e_pf_enable_irq0(hw);
6478         rte_intr_enable(dev->intr_handle);
6479 }
6480
6481 int
6482 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6483                          struct i40e_macvlan_filter *filter,
6484                          int total)
6485 {
6486         int ele_num, ele_buff_size;
6487         int num, actual_num, i;
6488         uint16_t flags;
6489         int ret = I40E_SUCCESS;
6490         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6491         struct i40e_aqc_add_macvlan_element_data *req_list;
6492
6493         if (filter == NULL  || total == 0)
6494                 return I40E_ERR_PARAM;
6495         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6496         ele_buff_size = hw->aq.asq_buf_size;
6497
6498         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6499         if (req_list == NULL) {
6500                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6501                 return I40E_ERR_NO_MEMORY;
6502         }
6503
6504         num = 0;
6505         do {
6506                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6507                 memset(req_list, 0, ele_buff_size);
6508
6509                 for (i = 0; i < actual_num; i++) {
6510                         rte_memcpy(req_list[i].mac_addr,
6511                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6512                         req_list[i].vlan_tag =
6513                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6514
6515                         switch (filter[num + i].filter_type) {
6516                         case RTE_MAC_PERFECT_MATCH:
6517                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6518                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6519                                 break;
6520                         case RTE_MACVLAN_PERFECT_MATCH:
6521                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6522                                 break;
6523                         case RTE_MAC_HASH_MATCH:
6524                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6525                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6526                                 break;
6527                         case RTE_MACVLAN_HASH_MATCH:
6528                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6529                                 break;
6530                         default:
6531                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6532                                 ret = I40E_ERR_PARAM;
6533                                 goto DONE;
6534                         }
6535
6536                         req_list[i].queue_number = 0;
6537
6538                         req_list[i].flags = rte_cpu_to_le_16(flags);
6539                 }
6540
6541                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6542                                                 actual_num, NULL);
6543                 if (ret != I40E_SUCCESS) {
6544                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6545                         goto DONE;
6546                 }
6547                 num += actual_num;
6548         } while (num < total);
6549
6550 DONE:
6551         rte_free(req_list);
6552         return ret;
6553 }
6554
6555 int
6556 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6557                             struct i40e_macvlan_filter *filter,
6558                             int total)
6559 {
6560         int ele_num, ele_buff_size;
6561         int num, actual_num, i;
6562         uint16_t flags;
6563         int ret = I40E_SUCCESS;
6564         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6565         struct i40e_aqc_remove_macvlan_element_data *req_list;
6566
6567         if (filter == NULL  || total == 0)
6568                 return I40E_ERR_PARAM;
6569
6570         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6571         ele_buff_size = hw->aq.asq_buf_size;
6572
6573         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6574         if (req_list == NULL) {
6575                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6576                 return I40E_ERR_NO_MEMORY;
6577         }
6578
6579         num = 0;
6580         do {
6581                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6582                 memset(req_list, 0, ele_buff_size);
6583
6584                 for (i = 0; i < actual_num; i++) {
6585                         rte_memcpy(req_list[i].mac_addr,
6586                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6587                         req_list[i].vlan_tag =
6588                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6589
6590                         switch (filter[num + i].filter_type) {
6591                         case RTE_MAC_PERFECT_MATCH:
6592                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6593                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6594                                 break;
6595                         case RTE_MACVLAN_PERFECT_MATCH:
6596                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6597                                 break;
6598                         case RTE_MAC_HASH_MATCH:
6599                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6600                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6601                                 break;
6602                         case RTE_MACVLAN_HASH_MATCH:
6603                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6604                                 break;
6605                         default:
6606                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6607                                 ret = I40E_ERR_PARAM;
6608                                 goto DONE;
6609                         }
6610                         req_list[i].flags = rte_cpu_to_le_16(flags);
6611                 }
6612
6613                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6614                                                 actual_num, NULL);
6615                 if (ret != I40E_SUCCESS) {
6616                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6617                         goto DONE;
6618                 }
6619                 num += actual_num;
6620         } while (num < total);
6621
6622 DONE:
6623         rte_free(req_list);
6624         return ret;
6625 }
6626
6627 /* Find out specific MAC filter */
6628 static struct i40e_mac_filter *
6629 i40e_find_mac_filter(struct i40e_vsi *vsi,
6630                          struct ether_addr *macaddr)
6631 {
6632         struct i40e_mac_filter *f;
6633
6634         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6635                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6636                         return f;
6637         }
6638
6639         return NULL;
6640 }
6641
6642 static bool
6643 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6644                          uint16_t vlan_id)
6645 {
6646         uint32_t vid_idx, vid_bit;
6647
6648         if (vlan_id > ETH_VLAN_ID_MAX)
6649                 return 0;
6650
6651         vid_idx = I40E_VFTA_IDX(vlan_id);
6652         vid_bit = I40E_VFTA_BIT(vlan_id);
6653
6654         if (vsi->vfta[vid_idx] & vid_bit)
6655                 return 1;
6656         else
6657                 return 0;
6658 }
6659
6660 static void
6661 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6662                        uint16_t vlan_id, bool on)
6663 {
6664         uint32_t vid_idx, vid_bit;
6665
6666         vid_idx = I40E_VFTA_IDX(vlan_id);
6667         vid_bit = I40E_VFTA_BIT(vlan_id);
6668
6669         if (on)
6670                 vsi->vfta[vid_idx] |= vid_bit;
6671         else
6672                 vsi->vfta[vid_idx] &= ~vid_bit;
6673 }
6674
6675 void
6676 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6677                      uint16_t vlan_id, bool on)
6678 {
6679         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6680         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6681         int ret;
6682
6683         if (vlan_id > ETH_VLAN_ID_MAX)
6684                 return;
6685
6686         i40e_store_vlan_filter(vsi, vlan_id, on);
6687
6688         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6689                 return;
6690
6691         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6692
6693         if (on) {
6694                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6695                                        &vlan_data, 1, NULL);
6696                 if (ret != I40E_SUCCESS)
6697                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6698         } else {
6699                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6700                                           &vlan_data, 1, NULL);
6701                 if (ret != I40E_SUCCESS)
6702                         PMD_DRV_LOG(ERR,
6703                                     "Failed to remove vlan filter");
6704         }
6705 }
6706
6707 /**
6708  * Find all vlan options for specific mac addr,
6709  * return with actual vlan found.
6710  */
6711 int
6712 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6713                            struct i40e_macvlan_filter *mv_f,
6714                            int num, struct ether_addr *addr)
6715 {
6716         int i;
6717         uint32_t j, k;
6718
6719         /**
6720          * Not to use i40e_find_vlan_filter to decrease the loop time,
6721          * although the code looks complex.
6722           */
6723         if (num < vsi->vlan_num)
6724                 return I40E_ERR_PARAM;
6725
6726         i = 0;
6727         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6728                 if (vsi->vfta[j]) {
6729                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6730                                 if (vsi->vfta[j] & (1 << k)) {
6731                                         if (i > num - 1) {
6732                                                 PMD_DRV_LOG(ERR,
6733                                                         "vlan number doesn't match");
6734                                                 return I40E_ERR_PARAM;
6735                                         }
6736                                         rte_memcpy(&mv_f[i].macaddr,
6737                                                         addr, ETH_ADDR_LEN);
6738                                         mv_f[i].vlan_id =
6739                                                 j * I40E_UINT32_BIT_SIZE + k;
6740                                         i++;
6741                                 }
6742                         }
6743                 }
6744         }
6745         return I40E_SUCCESS;
6746 }
6747
6748 static inline int
6749 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6750                            struct i40e_macvlan_filter *mv_f,
6751                            int num,
6752                            uint16_t vlan)
6753 {
6754         int i = 0;
6755         struct i40e_mac_filter *f;
6756
6757         if (num < vsi->mac_num)
6758                 return I40E_ERR_PARAM;
6759
6760         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6761                 if (i > num - 1) {
6762                         PMD_DRV_LOG(ERR, "buffer number not match");
6763                         return I40E_ERR_PARAM;
6764                 }
6765                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6766                                 ETH_ADDR_LEN);
6767                 mv_f[i].vlan_id = vlan;
6768                 mv_f[i].filter_type = f->mac_info.filter_type;
6769                 i++;
6770         }
6771
6772         return I40E_SUCCESS;
6773 }
6774
6775 static int
6776 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6777 {
6778         int i, j, num;
6779         struct i40e_mac_filter *f;
6780         struct i40e_macvlan_filter *mv_f;
6781         int ret = I40E_SUCCESS;
6782
6783         if (vsi == NULL || vsi->mac_num == 0)
6784                 return I40E_ERR_PARAM;
6785
6786         /* Case that no vlan is set */
6787         if (vsi->vlan_num == 0)
6788                 num = vsi->mac_num;
6789         else
6790                 num = vsi->mac_num * vsi->vlan_num;
6791
6792         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6793         if (mv_f == NULL) {
6794                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6795                 return I40E_ERR_NO_MEMORY;
6796         }
6797
6798         i = 0;
6799         if (vsi->vlan_num == 0) {
6800                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6801                         rte_memcpy(&mv_f[i].macaddr,
6802                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6803                         mv_f[i].filter_type = f->mac_info.filter_type;
6804                         mv_f[i].vlan_id = 0;
6805                         i++;
6806                 }
6807         } else {
6808                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6809                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6810                                         vsi->vlan_num, &f->mac_info.mac_addr);
6811                         if (ret != I40E_SUCCESS)
6812                                 goto DONE;
6813                         for (j = i; j < i + vsi->vlan_num; j++)
6814                                 mv_f[j].filter_type = f->mac_info.filter_type;
6815                         i += vsi->vlan_num;
6816                 }
6817         }
6818
6819         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6820 DONE:
6821         rte_free(mv_f);
6822
6823         return ret;
6824 }
6825
6826 int
6827 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6828 {
6829         struct i40e_macvlan_filter *mv_f;
6830         int mac_num;
6831         int ret = I40E_SUCCESS;
6832
6833         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6834                 return I40E_ERR_PARAM;
6835
6836         /* If it's already set, just return */
6837         if (i40e_find_vlan_filter(vsi,vlan))
6838                 return I40E_SUCCESS;
6839
6840         mac_num = vsi->mac_num;
6841
6842         if (mac_num == 0) {
6843                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6844                 return I40E_ERR_PARAM;
6845         }
6846
6847         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6848
6849         if (mv_f == NULL) {
6850                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6851                 return I40E_ERR_NO_MEMORY;
6852         }
6853
6854         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6855
6856         if (ret != I40E_SUCCESS)
6857                 goto DONE;
6858
6859         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6860
6861         if (ret != I40E_SUCCESS)
6862                 goto DONE;
6863
6864         i40e_set_vlan_filter(vsi, vlan, 1);
6865
6866         vsi->vlan_num++;
6867         ret = I40E_SUCCESS;
6868 DONE:
6869         rte_free(mv_f);
6870         return ret;
6871 }
6872
6873 int
6874 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6875 {
6876         struct i40e_macvlan_filter *mv_f;
6877         int mac_num;
6878         int ret = I40E_SUCCESS;
6879
6880         /**
6881          * Vlan 0 is the generic filter for untagged packets
6882          * and can't be removed.
6883          */
6884         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6885                 return I40E_ERR_PARAM;
6886
6887         /* If can't find it, just return */
6888         if (!i40e_find_vlan_filter(vsi, vlan))
6889                 return I40E_ERR_PARAM;
6890
6891         mac_num = vsi->mac_num;
6892
6893         if (mac_num == 0) {
6894                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6895                 return I40E_ERR_PARAM;
6896         }
6897
6898         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6899
6900         if (mv_f == NULL) {
6901                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6902                 return I40E_ERR_NO_MEMORY;
6903         }
6904
6905         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6906
6907         if (ret != I40E_SUCCESS)
6908                 goto DONE;
6909
6910         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6911
6912         if (ret != I40E_SUCCESS)
6913                 goto DONE;
6914
6915         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6916         if (vsi->vlan_num == 1) {
6917                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6918                 if (ret != I40E_SUCCESS)
6919                         goto DONE;
6920
6921                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6922                 if (ret != I40E_SUCCESS)
6923                         goto DONE;
6924         }
6925
6926         i40e_set_vlan_filter(vsi, vlan, 0);
6927
6928         vsi->vlan_num--;
6929         ret = I40E_SUCCESS;
6930 DONE:
6931         rte_free(mv_f);
6932         return ret;
6933 }
6934
6935 int
6936 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6937 {
6938         struct i40e_mac_filter *f;
6939         struct i40e_macvlan_filter *mv_f;
6940         int i, vlan_num = 0;
6941         int ret = I40E_SUCCESS;
6942
6943         /* If it's add and we've config it, return */
6944         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6945         if (f != NULL)
6946                 return I40E_SUCCESS;
6947         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6948                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6949
6950                 /**
6951                  * If vlan_num is 0, that's the first time to add mac,
6952                  * set mask for vlan_id 0.
6953                  */
6954                 if (vsi->vlan_num == 0) {
6955                         i40e_set_vlan_filter(vsi, 0, 1);
6956                         vsi->vlan_num = 1;
6957                 }
6958                 vlan_num = vsi->vlan_num;
6959         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6960                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6961                 vlan_num = 1;
6962
6963         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6964         if (mv_f == NULL) {
6965                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6966                 return I40E_ERR_NO_MEMORY;
6967         }
6968
6969         for (i = 0; i < vlan_num; i++) {
6970                 mv_f[i].filter_type = mac_filter->filter_type;
6971                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6972                                 ETH_ADDR_LEN);
6973         }
6974
6975         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6976                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6977                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6978                                         &mac_filter->mac_addr);
6979                 if (ret != I40E_SUCCESS)
6980                         goto DONE;
6981         }
6982
6983         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6984         if (ret != I40E_SUCCESS)
6985                 goto DONE;
6986
6987         /* Add the mac addr into mac list */
6988         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6989         if (f == NULL) {
6990                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6991                 ret = I40E_ERR_NO_MEMORY;
6992                 goto DONE;
6993         }
6994         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6995                         ETH_ADDR_LEN);
6996         f->mac_info.filter_type = mac_filter->filter_type;
6997         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6998         vsi->mac_num++;
6999
7000         ret = I40E_SUCCESS;
7001 DONE:
7002         rte_free(mv_f);
7003
7004         return ret;
7005 }
7006
7007 int
7008 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
7009 {
7010         struct i40e_mac_filter *f;
7011         struct i40e_macvlan_filter *mv_f;
7012         int i, vlan_num;
7013         enum rte_mac_filter_type filter_type;
7014         int ret = I40E_SUCCESS;
7015
7016         /* Can't find it, return an error */
7017         f = i40e_find_mac_filter(vsi, addr);
7018         if (f == NULL)
7019                 return I40E_ERR_PARAM;
7020
7021         vlan_num = vsi->vlan_num;
7022         filter_type = f->mac_info.filter_type;
7023         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7024                 filter_type == RTE_MACVLAN_HASH_MATCH) {
7025                 if (vlan_num == 0) {
7026                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7027                         return I40E_ERR_PARAM;
7028                 }
7029         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7030                         filter_type == RTE_MAC_HASH_MATCH)
7031                 vlan_num = 1;
7032
7033         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7034         if (mv_f == NULL) {
7035                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7036                 return I40E_ERR_NO_MEMORY;
7037         }
7038
7039         for (i = 0; i < vlan_num; i++) {
7040                 mv_f[i].filter_type = filter_type;
7041                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7042                                 ETH_ADDR_LEN);
7043         }
7044         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7045                         filter_type == RTE_MACVLAN_HASH_MATCH) {
7046                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7047                 if (ret != I40E_SUCCESS)
7048                         goto DONE;
7049         }
7050
7051         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7052         if (ret != I40E_SUCCESS)
7053                 goto DONE;
7054
7055         /* Remove the mac addr into mac list */
7056         TAILQ_REMOVE(&vsi->mac_list, f, next);
7057         rte_free(f);
7058         vsi->mac_num--;
7059
7060         ret = I40E_SUCCESS;
7061 DONE:
7062         rte_free(mv_f);
7063         return ret;
7064 }
7065
7066 /* Configure hash enable flags for RSS */
7067 uint64_t
7068 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7069 {
7070         uint64_t hena = 0;
7071         int i;
7072
7073         if (!flags)
7074                 return hena;
7075
7076         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7077                 if (flags & (1ULL << i))
7078                         hena |= adapter->pctypes_tbl[i];
7079         }
7080
7081         return hena;
7082 }
7083
7084 /* Parse the hash enable flags */
7085 uint64_t
7086 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7087 {
7088         uint64_t rss_hf = 0;
7089
7090         if (!flags)
7091                 return rss_hf;
7092         int i;
7093
7094         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7095                 if (flags & adapter->pctypes_tbl[i])
7096                         rss_hf |= (1ULL << i);
7097         }
7098         return rss_hf;
7099 }
7100
7101 /* Disable RSS */
7102 static void
7103 i40e_pf_disable_rss(struct i40e_pf *pf)
7104 {
7105         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7106
7107         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7108         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7109         I40E_WRITE_FLUSH(hw);
7110 }
7111
7112 int
7113 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7114 {
7115         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7116         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7117         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7118                            I40E_VFQF_HKEY_MAX_INDEX :
7119                            I40E_PFQF_HKEY_MAX_INDEX;
7120         int ret = 0;
7121
7122         if (!key || key_len == 0) {
7123                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7124                 return 0;
7125         } else if (key_len != (key_idx + 1) *
7126                 sizeof(uint32_t)) {
7127                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7128                 return -EINVAL;
7129         }
7130
7131         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7132                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7133                         (struct i40e_aqc_get_set_rss_key_data *)key;
7134
7135                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7136                 if (ret)
7137                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7138         } else {
7139                 uint32_t *hash_key = (uint32_t *)key;
7140                 uint16_t i;
7141
7142                 if (vsi->type == I40E_VSI_SRIOV) {
7143                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7144                                 I40E_WRITE_REG(
7145                                         hw,
7146                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7147                                         hash_key[i]);
7148
7149                 } else {
7150                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7151                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7152                                                hash_key[i]);
7153                 }
7154                 I40E_WRITE_FLUSH(hw);
7155         }
7156
7157         return ret;
7158 }
7159
7160 static int
7161 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7162 {
7163         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7164         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7165         uint32_t reg;
7166         int ret;
7167
7168         if (!key || !key_len)
7169                 return -EINVAL;
7170
7171         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7172                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7173                         (struct i40e_aqc_get_set_rss_key_data *)key);
7174                 if (ret) {
7175                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7176                         return ret;
7177                 }
7178         } else {
7179                 uint32_t *key_dw = (uint32_t *)key;
7180                 uint16_t i;
7181
7182                 if (vsi->type == I40E_VSI_SRIOV) {
7183                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7184                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7185                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7186                         }
7187                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7188                                    sizeof(uint32_t);
7189                 } else {
7190                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7191                                 reg = I40E_PFQF_HKEY(i);
7192                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7193                         }
7194                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7195                                    sizeof(uint32_t);
7196                 }
7197         }
7198         return 0;
7199 }
7200
7201 static int
7202 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7203 {
7204         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7205         uint64_t hena;
7206         int ret;
7207
7208         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7209                                rss_conf->rss_key_len);
7210         if (ret)
7211                 return ret;
7212
7213         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7214         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7215         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7216         I40E_WRITE_FLUSH(hw);
7217
7218         return 0;
7219 }
7220
7221 static int
7222 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7223                          struct rte_eth_rss_conf *rss_conf)
7224 {
7225         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7226         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7227         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7228         uint64_t hena;
7229
7230         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7231         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7232
7233         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7234                 if (rss_hf != 0) /* Enable RSS */
7235                         return -EINVAL;
7236                 return 0; /* Nothing to do */
7237         }
7238         /* RSS enabled */
7239         if (rss_hf == 0) /* Disable RSS */
7240                 return -EINVAL;
7241
7242         return i40e_hw_rss_hash_set(pf, rss_conf);
7243 }
7244
7245 static int
7246 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7247                            struct rte_eth_rss_conf *rss_conf)
7248 {
7249         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7250         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7251         uint64_t hena;
7252
7253         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7254                          &rss_conf->rss_key_len);
7255
7256         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7257         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7258         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7259
7260         return 0;
7261 }
7262
7263 static int
7264 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7265 {
7266         switch (filter_type) {
7267         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7268                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7269                 break;
7270         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7271                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7272                 break;
7273         case RTE_TUNNEL_FILTER_IMAC_TENID:
7274                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7275                 break;
7276         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7277                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7278                 break;
7279         case ETH_TUNNEL_FILTER_IMAC:
7280                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7281                 break;
7282         case ETH_TUNNEL_FILTER_OIP:
7283                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7284                 break;
7285         case ETH_TUNNEL_FILTER_IIP:
7286                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7287                 break;
7288         default:
7289                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7290                 return -EINVAL;
7291         }
7292
7293         return 0;
7294 }
7295
7296 /* Convert tunnel filter structure */
7297 static int
7298 i40e_tunnel_filter_convert(
7299         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7300         struct i40e_tunnel_filter *tunnel_filter)
7301 {
7302         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7303                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
7304         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7305                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
7306         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7307         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7308              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7309             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7310                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7311         else
7312                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7313         tunnel_filter->input.flags = cld_filter->element.flags;
7314         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7315         tunnel_filter->queue = cld_filter->element.queue_number;
7316         rte_memcpy(tunnel_filter->input.general_fields,
7317                    cld_filter->general_fields,
7318                    sizeof(cld_filter->general_fields));
7319
7320         return 0;
7321 }
7322
7323 /* Check if there exists the tunnel filter */
7324 struct i40e_tunnel_filter *
7325 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7326                              const struct i40e_tunnel_filter_input *input)
7327 {
7328         int ret;
7329
7330         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7331         if (ret < 0)
7332                 return NULL;
7333
7334         return tunnel_rule->hash_map[ret];
7335 }
7336
7337 /* Add a tunnel filter into the SW list */
7338 static int
7339 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7340                              struct i40e_tunnel_filter *tunnel_filter)
7341 {
7342         struct i40e_tunnel_rule *rule = &pf->tunnel;
7343         int ret;
7344
7345         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7346         if (ret < 0) {
7347                 PMD_DRV_LOG(ERR,
7348                             "Failed to insert tunnel filter to hash table %d!",
7349                             ret);
7350                 return ret;
7351         }
7352         rule->hash_map[ret] = tunnel_filter;
7353
7354         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7355
7356         return 0;
7357 }
7358
7359 /* Delete a tunnel filter from the SW list */
7360 int
7361 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7362                           struct i40e_tunnel_filter_input *input)
7363 {
7364         struct i40e_tunnel_rule *rule = &pf->tunnel;
7365         struct i40e_tunnel_filter *tunnel_filter;
7366         int ret;
7367
7368         ret = rte_hash_del_key(rule->hash_table, input);
7369         if (ret < 0) {
7370                 PMD_DRV_LOG(ERR,
7371                             "Failed to delete tunnel filter to hash table %d!",
7372                             ret);
7373                 return ret;
7374         }
7375         tunnel_filter = rule->hash_map[ret];
7376         rule->hash_map[ret] = NULL;
7377
7378         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7379         rte_free(tunnel_filter);
7380
7381         return 0;
7382 }
7383
7384 int
7385 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7386                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7387                         uint8_t add)
7388 {
7389         uint16_t ip_type;
7390         uint32_t ipv4_addr, ipv4_addr_le;
7391         uint8_t i, tun_type = 0;
7392         /* internal varialbe to convert ipv6 byte order */
7393         uint32_t convert_ipv6[4];
7394         int val, ret = 0;
7395         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7396         struct i40e_vsi *vsi = pf->main_vsi;
7397         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7398         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7399         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7400         struct i40e_tunnel_filter *tunnel, *node;
7401         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7402
7403         cld_filter = rte_zmalloc("tunnel_filter",
7404                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7405         0);
7406
7407         if (NULL == cld_filter) {
7408                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7409                 return -ENOMEM;
7410         }
7411         pfilter = cld_filter;
7412
7413         ether_addr_copy(&tunnel_filter->outer_mac,
7414                         (struct ether_addr *)&pfilter->element.outer_mac);
7415         ether_addr_copy(&tunnel_filter->inner_mac,
7416                         (struct ether_addr *)&pfilter->element.inner_mac);
7417
7418         pfilter->element.inner_vlan =
7419                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7420         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7421                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7422                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7423                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7424                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7425                                 &ipv4_addr_le,
7426                                 sizeof(pfilter->element.ipaddr.v4.data));
7427         } else {
7428                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7429                 for (i = 0; i < 4; i++) {
7430                         convert_ipv6[i] =
7431                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7432                 }
7433                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7434                            &convert_ipv6,
7435                            sizeof(pfilter->element.ipaddr.v6.data));
7436         }
7437
7438         /* check tunneled type */
7439         switch (tunnel_filter->tunnel_type) {
7440         case RTE_TUNNEL_TYPE_VXLAN:
7441                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7442                 break;
7443         case RTE_TUNNEL_TYPE_NVGRE:
7444                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7445                 break;
7446         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7447                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7448                 break;
7449         default:
7450                 /* Other tunnel types is not supported. */
7451                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7452                 rte_free(cld_filter);
7453                 return -EINVAL;
7454         }
7455
7456         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7457                                        &pfilter->element.flags);
7458         if (val < 0) {
7459                 rte_free(cld_filter);
7460                 return -EINVAL;
7461         }
7462
7463         pfilter->element.flags |= rte_cpu_to_le_16(
7464                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7465                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7466         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7467         pfilter->element.queue_number =
7468                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7469
7470         /* Check if there is the filter in SW list */
7471         memset(&check_filter, 0, sizeof(check_filter));
7472         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7473         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7474         if (add && node) {
7475                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7476                 rte_free(cld_filter);
7477                 return -EINVAL;
7478         }
7479
7480         if (!add && !node) {
7481                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7482                 rte_free(cld_filter);
7483                 return -EINVAL;
7484         }
7485
7486         if (add) {
7487                 ret = i40e_aq_add_cloud_filters(hw,
7488                                         vsi->seid, &cld_filter->element, 1);
7489                 if (ret < 0) {
7490                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7491                         rte_free(cld_filter);
7492                         return -ENOTSUP;
7493                 }
7494                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7495                 if (tunnel == NULL) {
7496                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7497                         rte_free(cld_filter);
7498                         return -ENOMEM;
7499                 }
7500
7501                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7502                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7503                 if (ret < 0)
7504                         rte_free(tunnel);
7505         } else {
7506                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7507                                                    &cld_filter->element, 1);
7508                 if (ret < 0) {
7509                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7510                         rte_free(cld_filter);
7511                         return -ENOTSUP;
7512                 }
7513                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7514         }
7515
7516         rte_free(cld_filter);
7517         return ret;
7518 }
7519
7520 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7521 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7522 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7523 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7524 #define I40E_TR_GRE_KEY_MASK                    0x400
7525 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7526 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7527
7528 static enum
7529 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7530 {
7531         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7532         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7533         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7534         enum i40e_status_code status = I40E_SUCCESS;
7535
7536         if (pf->support_multi_driver) {
7537                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7538                 return I40E_NOT_SUPPORTED;
7539         }
7540
7541         memset(&filter_replace, 0,
7542                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7543         memset(&filter_replace_buf, 0,
7544                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7545
7546         /* create L1 filter */
7547         filter_replace.old_filter_type =
7548                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7549         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7550         filter_replace.tr_bit = 0;
7551
7552         /* Prepare the buffer, 3 entries */
7553         filter_replace_buf.data[0] =
7554                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7555         filter_replace_buf.data[0] |=
7556                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7557         filter_replace_buf.data[2] = 0xFF;
7558         filter_replace_buf.data[3] = 0xFF;
7559         filter_replace_buf.data[4] =
7560                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7561         filter_replace_buf.data[4] |=
7562                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7563         filter_replace_buf.data[7] = 0xF0;
7564         filter_replace_buf.data[8]
7565                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7566         filter_replace_buf.data[8] |=
7567                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7568         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7569                 I40E_TR_GENEVE_KEY_MASK |
7570                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7571         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7572                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7573                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7574
7575         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7576                                                &filter_replace_buf);
7577         if (!status) {
7578                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7579                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7580                             "cloud l1 type is changed from 0x%x to 0x%x",
7581                             filter_replace.old_filter_type,
7582                             filter_replace.new_filter_type);
7583         }
7584         return status;
7585 }
7586
7587 static enum
7588 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7589 {
7590         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7591         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7592         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7593         enum i40e_status_code status = I40E_SUCCESS;
7594
7595         if (pf->support_multi_driver) {
7596                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7597                 return I40E_NOT_SUPPORTED;
7598         }
7599
7600         /* For MPLSoUDP */
7601         memset(&filter_replace, 0,
7602                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7603         memset(&filter_replace_buf, 0,
7604                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7605         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7606                 I40E_AQC_MIRROR_CLOUD_FILTER;
7607         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7608         filter_replace.new_filter_type =
7609                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7610         /* Prepare the buffer, 2 entries */
7611         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7612         filter_replace_buf.data[0] |=
7613                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7614         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7615         filter_replace_buf.data[4] |=
7616                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7617         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7618                                                &filter_replace_buf);
7619         if (status < 0)
7620                 return status;
7621         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7622                     "cloud filter type is changed from 0x%x to 0x%x",
7623                     filter_replace.old_filter_type,
7624                     filter_replace.new_filter_type);
7625
7626         /* For MPLSoGRE */
7627         memset(&filter_replace, 0,
7628                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7629         memset(&filter_replace_buf, 0,
7630                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7631
7632         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7633                 I40E_AQC_MIRROR_CLOUD_FILTER;
7634         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7635         filter_replace.new_filter_type =
7636                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7637         /* Prepare the buffer, 2 entries */
7638         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7639         filter_replace_buf.data[0] |=
7640                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7641         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7642         filter_replace_buf.data[4] |=
7643                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7644
7645         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7646                                                &filter_replace_buf);
7647         if (!status) {
7648                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7649                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7650                             "cloud filter type is changed from 0x%x to 0x%x",
7651                             filter_replace.old_filter_type,
7652                             filter_replace.new_filter_type);
7653         }
7654         return status;
7655 }
7656
7657 static enum i40e_status_code
7658 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7659 {
7660         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7661         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7662         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7663         enum i40e_status_code status = I40E_SUCCESS;
7664
7665         if (pf->support_multi_driver) {
7666                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7667                 return I40E_NOT_SUPPORTED;
7668         }
7669
7670         /* For GTP-C */
7671         memset(&filter_replace, 0,
7672                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7673         memset(&filter_replace_buf, 0,
7674                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7675         /* create L1 filter */
7676         filter_replace.old_filter_type =
7677                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7678         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7679         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7680                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7681         /* Prepare the buffer, 2 entries */
7682         filter_replace_buf.data[0] =
7683                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7684         filter_replace_buf.data[0] |=
7685                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7686         filter_replace_buf.data[2] = 0xFF;
7687         filter_replace_buf.data[3] = 0xFF;
7688         filter_replace_buf.data[4] =
7689                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7690         filter_replace_buf.data[4] |=
7691                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7692         filter_replace_buf.data[6] = 0xFF;
7693         filter_replace_buf.data[7] = 0xFF;
7694         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7695                                                &filter_replace_buf);
7696         if (status < 0)
7697                 return status;
7698         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7699                     "cloud l1 type is changed from 0x%x to 0x%x",
7700                     filter_replace.old_filter_type,
7701                     filter_replace.new_filter_type);
7702
7703         /* for GTP-U */
7704         memset(&filter_replace, 0,
7705                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7706         memset(&filter_replace_buf, 0,
7707                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7708         /* create L1 filter */
7709         filter_replace.old_filter_type =
7710                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7711         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7712         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7713                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7714         /* Prepare the buffer, 2 entries */
7715         filter_replace_buf.data[0] =
7716                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7717         filter_replace_buf.data[0] |=
7718                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7719         filter_replace_buf.data[2] = 0xFF;
7720         filter_replace_buf.data[3] = 0xFF;
7721         filter_replace_buf.data[4] =
7722                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7723         filter_replace_buf.data[4] |=
7724                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7725         filter_replace_buf.data[6] = 0xFF;
7726         filter_replace_buf.data[7] = 0xFF;
7727
7728         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7729                                                &filter_replace_buf);
7730         if (!status) {
7731                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7732                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7733                             "cloud l1 type is changed from 0x%x to 0x%x",
7734                             filter_replace.old_filter_type,
7735                             filter_replace.new_filter_type);
7736         }
7737         return status;
7738 }
7739
7740 static enum
7741 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7742 {
7743         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7744         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7745         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7746         enum i40e_status_code status = I40E_SUCCESS;
7747
7748         if (pf->support_multi_driver) {
7749                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7750                 return I40E_NOT_SUPPORTED;
7751         }
7752
7753         /* for GTP-C */
7754         memset(&filter_replace, 0,
7755                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7756         memset(&filter_replace_buf, 0,
7757                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7758         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7759         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7760         filter_replace.new_filter_type =
7761                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7762         /* Prepare the buffer, 2 entries */
7763         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7764         filter_replace_buf.data[0] |=
7765                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7766         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7767         filter_replace_buf.data[4] |=
7768                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7769         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7770                                                &filter_replace_buf);
7771         if (status < 0)
7772                 return status;
7773         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7774                     "cloud filter type is changed from 0x%x to 0x%x",
7775                     filter_replace.old_filter_type,
7776                     filter_replace.new_filter_type);
7777
7778         /* for GTP-U */
7779         memset(&filter_replace, 0,
7780                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7781         memset(&filter_replace_buf, 0,
7782                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7783         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7784         filter_replace.old_filter_type =
7785                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7786         filter_replace.new_filter_type =
7787                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7788         /* Prepare the buffer, 2 entries */
7789         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7790         filter_replace_buf.data[0] |=
7791                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7792         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7793         filter_replace_buf.data[4] |=
7794                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7795
7796         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7797                                                &filter_replace_buf);
7798         if (!status) {
7799                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7800                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7801                             "cloud filter type is changed from 0x%x to 0x%x",
7802                             filter_replace.old_filter_type,
7803                             filter_replace.new_filter_type);
7804         }
7805         return status;
7806 }
7807
7808 int
7809 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7810                       struct i40e_tunnel_filter_conf *tunnel_filter,
7811                       uint8_t add)
7812 {
7813         uint16_t ip_type;
7814         uint32_t ipv4_addr, ipv4_addr_le;
7815         uint8_t i, tun_type = 0;
7816         /* internal variable to convert ipv6 byte order */
7817         uint32_t convert_ipv6[4];
7818         int val, ret = 0;
7819         struct i40e_pf_vf *vf = NULL;
7820         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7821         struct i40e_vsi *vsi;
7822         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7823         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7824         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7825         struct i40e_tunnel_filter *tunnel, *node;
7826         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7827         uint32_t teid_le;
7828         bool big_buffer = 0;
7829
7830         cld_filter = rte_zmalloc("tunnel_filter",
7831                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7832                          0);
7833
7834         if (cld_filter == NULL) {
7835                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7836                 return -ENOMEM;
7837         }
7838         pfilter = cld_filter;
7839
7840         ether_addr_copy(&tunnel_filter->outer_mac,
7841                         (struct ether_addr *)&pfilter->element.outer_mac);
7842         ether_addr_copy(&tunnel_filter->inner_mac,
7843                         (struct ether_addr *)&pfilter->element.inner_mac);
7844
7845         pfilter->element.inner_vlan =
7846                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7847         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7848                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7849                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7850                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7851                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7852                                 &ipv4_addr_le,
7853                                 sizeof(pfilter->element.ipaddr.v4.data));
7854         } else {
7855                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7856                 for (i = 0; i < 4; i++) {
7857                         convert_ipv6[i] =
7858                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7859                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7860                 }
7861                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7862                            &convert_ipv6,
7863                            sizeof(pfilter->element.ipaddr.v6.data));
7864         }
7865
7866         /* check tunneled type */
7867         switch (tunnel_filter->tunnel_type) {
7868         case I40E_TUNNEL_TYPE_VXLAN:
7869                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7870                 break;
7871         case I40E_TUNNEL_TYPE_NVGRE:
7872                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7873                 break;
7874         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7875                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7876                 break;
7877         case I40E_TUNNEL_TYPE_MPLSoUDP:
7878                 if (!pf->mpls_replace_flag) {
7879                         i40e_replace_mpls_l1_filter(pf);
7880                         i40e_replace_mpls_cloud_filter(pf);
7881                         pf->mpls_replace_flag = 1;
7882                 }
7883                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7884                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7885                         teid_le >> 4;
7886                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7887                         (teid_le & 0xF) << 12;
7888                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7889                         0x40;
7890                 big_buffer = 1;
7891                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7892                 break;
7893         case I40E_TUNNEL_TYPE_MPLSoGRE:
7894                 if (!pf->mpls_replace_flag) {
7895                         i40e_replace_mpls_l1_filter(pf);
7896                         i40e_replace_mpls_cloud_filter(pf);
7897                         pf->mpls_replace_flag = 1;
7898                 }
7899                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7900                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7901                         teid_le >> 4;
7902                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7903                         (teid_le & 0xF) << 12;
7904                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7905                         0x0;
7906                 big_buffer = 1;
7907                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7908                 break;
7909         case I40E_TUNNEL_TYPE_GTPC:
7910                 if (!pf->gtp_replace_flag) {
7911                         i40e_replace_gtp_l1_filter(pf);
7912                         i40e_replace_gtp_cloud_filter(pf);
7913                         pf->gtp_replace_flag = 1;
7914                 }
7915                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7916                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7917                         (teid_le >> 16) & 0xFFFF;
7918                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7919                         teid_le & 0xFFFF;
7920                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7921                         0x0;
7922                 big_buffer = 1;
7923                 break;
7924         case I40E_TUNNEL_TYPE_GTPU:
7925                 if (!pf->gtp_replace_flag) {
7926                         i40e_replace_gtp_l1_filter(pf);
7927                         i40e_replace_gtp_cloud_filter(pf);
7928                         pf->gtp_replace_flag = 1;
7929                 }
7930                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7931                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7932                         (teid_le >> 16) & 0xFFFF;
7933                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7934                         teid_le & 0xFFFF;
7935                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7936                         0x0;
7937                 big_buffer = 1;
7938                 break;
7939         case I40E_TUNNEL_TYPE_QINQ:
7940                 if (!pf->qinq_replace_flag) {
7941                         ret = i40e_cloud_filter_qinq_create(pf);
7942                         if (ret < 0)
7943                                 PMD_DRV_LOG(DEBUG,
7944                                             "QinQ tunnel filter already created.");
7945                         pf->qinq_replace_flag = 1;
7946                 }
7947                 /*      Add in the General fields the values of
7948                  *      the Outer and Inner VLAN
7949                  *      Big Buffer should be set, see changes in
7950                  *      i40e_aq_add_cloud_filters
7951                  */
7952                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7953                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7954                 big_buffer = 1;
7955                 break;
7956         default:
7957                 /* Other tunnel types is not supported. */
7958                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7959                 rte_free(cld_filter);
7960                 return -EINVAL;
7961         }
7962
7963         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7964                 pfilter->element.flags =
7965                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7966         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7967                 pfilter->element.flags =
7968                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7969         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7970                 pfilter->element.flags =
7971                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7972         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7973                 pfilter->element.flags =
7974                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7975         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7976                 pfilter->element.flags |=
7977                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7978         else {
7979                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7980                                                 &pfilter->element.flags);
7981                 if (val < 0) {
7982                         rte_free(cld_filter);
7983                         return -EINVAL;
7984                 }
7985         }
7986
7987         pfilter->element.flags |= rte_cpu_to_le_16(
7988                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7989                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7990         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7991         pfilter->element.queue_number =
7992                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7993
7994         if (!tunnel_filter->is_to_vf)
7995                 vsi = pf->main_vsi;
7996         else {
7997                 if (tunnel_filter->vf_id >= pf->vf_num) {
7998                         PMD_DRV_LOG(ERR, "Invalid argument.");
7999                         rte_free(cld_filter);
8000                         return -EINVAL;
8001                 }
8002                 vf = &pf->vfs[tunnel_filter->vf_id];
8003                 vsi = vf->vsi;
8004         }
8005
8006         /* Check if there is the filter in SW list */
8007         memset(&check_filter, 0, sizeof(check_filter));
8008         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8009         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8010         check_filter.vf_id = tunnel_filter->vf_id;
8011         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8012         if (add && node) {
8013                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8014                 rte_free(cld_filter);
8015                 return -EINVAL;
8016         }
8017
8018         if (!add && !node) {
8019                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8020                 rte_free(cld_filter);
8021                 return -EINVAL;
8022         }
8023
8024         if (add) {
8025                 if (big_buffer)
8026                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
8027                                                    vsi->seid, cld_filter, 1);
8028                 else
8029                         ret = i40e_aq_add_cloud_filters(hw,
8030                                         vsi->seid, &cld_filter->element, 1);
8031                 if (ret < 0) {
8032                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8033                         rte_free(cld_filter);
8034                         return -ENOTSUP;
8035                 }
8036                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8037                 if (tunnel == NULL) {
8038                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8039                         rte_free(cld_filter);
8040                         return -ENOMEM;
8041                 }
8042
8043                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8044                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8045                 if (ret < 0)
8046                         rte_free(tunnel);
8047         } else {
8048                 if (big_buffer)
8049                         ret = i40e_aq_remove_cloud_filters_big_buffer(
8050                                 hw, vsi->seid, cld_filter, 1);
8051                 else
8052                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
8053                                                    &cld_filter->element, 1);
8054                 if (ret < 0) {
8055                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8056                         rte_free(cld_filter);
8057                         return -ENOTSUP;
8058                 }
8059                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8060         }
8061
8062         rte_free(cld_filter);
8063         return ret;
8064 }
8065
8066 static int
8067 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8068 {
8069         uint8_t i;
8070
8071         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8072                 if (pf->vxlan_ports[i] == port)
8073                         return i;
8074         }
8075
8076         return -1;
8077 }
8078
8079 static int
8080 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
8081 {
8082         int  idx, ret;
8083         uint8_t filter_idx;
8084         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8085
8086         idx = i40e_get_vxlan_port_idx(pf, port);
8087
8088         /* Check if port already exists */
8089         if (idx >= 0) {
8090                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8091                 return -EINVAL;
8092         }
8093
8094         /* Now check if there is space to add the new port */
8095         idx = i40e_get_vxlan_port_idx(pf, 0);
8096         if (idx < 0) {
8097                 PMD_DRV_LOG(ERR,
8098                         "Maximum number of UDP ports reached, not adding port %d",
8099                         port);
8100                 return -ENOSPC;
8101         }
8102
8103         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
8104                                         &filter_idx, NULL);
8105         if (ret < 0) {
8106                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8107                 return -1;
8108         }
8109
8110         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8111                          port,  filter_idx);
8112
8113         /* New port: add it and mark its index in the bitmap */
8114         pf->vxlan_ports[idx] = port;
8115         pf->vxlan_bitmap |= (1 << idx);
8116
8117         if (!(pf->flags & I40E_FLAG_VXLAN))
8118                 pf->flags |= I40E_FLAG_VXLAN;
8119
8120         return 0;
8121 }
8122
8123 static int
8124 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8125 {
8126         int idx;
8127         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8128
8129         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8130                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8131                 return -EINVAL;
8132         }
8133
8134         idx = i40e_get_vxlan_port_idx(pf, port);
8135
8136         if (idx < 0) {
8137                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8138                 return -EINVAL;
8139         }
8140
8141         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8142                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8143                 return -1;
8144         }
8145
8146         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8147                         port, idx);
8148
8149         pf->vxlan_ports[idx] = 0;
8150         pf->vxlan_bitmap &= ~(1 << idx);
8151
8152         if (!pf->vxlan_bitmap)
8153                 pf->flags &= ~I40E_FLAG_VXLAN;
8154
8155         return 0;
8156 }
8157
8158 /* Add UDP tunneling port */
8159 static int
8160 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8161                              struct rte_eth_udp_tunnel *udp_tunnel)
8162 {
8163         int ret = 0;
8164         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8165
8166         if (udp_tunnel == NULL)
8167                 return -EINVAL;
8168
8169         switch (udp_tunnel->prot_type) {
8170         case RTE_TUNNEL_TYPE_VXLAN:
8171                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8172                 break;
8173
8174         case RTE_TUNNEL_TYPE_GENEVE:
8175         case RTE_TUNNEL_TYPE_TEREDO:
8176                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8177                 ret = -1;
8178                 break;
8179
8180         default:
8181                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8182                 ret = -1;
8183                 break;
8184         }
8185
8186         return ret;
8187 }
8188
8189 /* Remove UDP tunneling port */
8190 static int
8191 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8192                              struct rte_eth_udp_tunnel *udp_tunnel)
8193 {
8194         int ret = 0;
8195         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8196
8197         if (udp_tunnel == NULL)
8198                 return -EINVAL;
8199
8200         switch (udp_tunnel->prot_type) {
8201         case RTE_TUNNEL_TYPE_VXLAN:
8202                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8203                 break;
8204         case RTE_TUNNEL_TYPE_GENEVE:
8205         case RTE_TUNNEL_TYPE_TEREDO:
8206                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8207                 ret = -1;
8208                 break;
8209         default:
8210                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8211                 ret = -1;
8212                 break;
8213         }
8214
8215         return ret;
8216 }
8217
8218 /* Calculate the maximum number of contiguous PF queues that are configured */
8219 static int
8220 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8221 {
8222         struct rte_eth_dev_data *data = pf->dev_data;
8223         int i, num;
8224         struct i40e_rx_queue *rxq;
8225
8226         num = 0;
8227         for (i = 0; i < pf->lan_nb_qps; i++) {
8228                 rxq = data->rx_queues[i];
8229                 if (rxq && rxq->q_set)
8230                         num++;
8231                 else
8232                         break;
8233         }
8234
8235         return num;
8236 }
8237
8238 /* Configure RSS */
8239 static int
8240 i40e_pf_config_rss(struct i40e_pf *pf)
8241 {
8242         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8243         struct rte_eth_rss_conf rss_conf;
8244         uint32_t i, lut = 0;
8245         uint16_t j, num;
8246
8247         /*
8248          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8249          * It's necessary to calculate the actual PF queues that are configured.
8250          */
8251         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8252                 num = i40e_pf_calc_configured_queues_num(pf);
8253         else
8254                 num = pf->dev_data->nb_rx_queues;
8255
8256         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8257         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8258                         num);
8259
8260         if (num == 0) {
8261                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8262                 return -ENOTSUP;
8263         }
8264
8265         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8266                 if (j == num)
8267                         j = 0;
8268                 lut = (lut << 8) | (j & ((0x1 <<
8269                         hw->func_caps.rss_table_entry_width) - 1));
8270                 if ((i & 3) == 3)
8271                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8272         }
8273
8274         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8275         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8276                 i40e_pf_disable_rss(pf);
8277                 return 0;
8278         }
8279         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8280                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8281                 /* Random default keys */
8282                 static uint32_t rss_key_default[] = {0x6b793944,
8283                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8284                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8285                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8286
8287                 rss_conf.rss_key = (uint8_t *)rss_key_default;
8288                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8289                                                         sizeof(uint32_t);
8290         }
8291
8292         return i40e_hw_rss_hash_set(pf, &rss_conf);
8293 }
8294
8295 static int
8296 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8297                                struct rte_eth_tunnel_filter_conf *filter)
8298 {
8299         if (pf == NULL || filter == NULL) {
8300                 PMD_DRV_LOG(ERR, "Invalid parameter");
8301                 return -EINVAL;
8302         }
8303
8304         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8305                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8306                 return -EINVAL;
8307         }
8308
8309         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8310                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8311                 return -EINVAL;
8312         }
8313
8314         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8315                 (is_zero_ether_addr(&filter->outer_mac))) {
8316                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8317                 return -EINVAL;
8318         }
8319
8320         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8321                 (is_zero_ether_addr(&filter->inner_mac))) {
8322                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8323                 return -EINVAL;
8324         }
8325
8326         return 0;
8327 }
8328
8329 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8330 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8331 static int
8332 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8333 {
8334         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8335         uint32_t val, reg;
8336         int ret = -EINVAL;
8337
8338         if (pf->support_multi_driver) {
8339                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8340                 return -ENOTSUP;
8341         }
8342
8343         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8344         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8345
8346         if (len == 3) {
8347                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8348         } else if (len == 4) {
8349                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8350         } else {
8351                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8352                 return ret;
8353         }
8354
8355         if (reg != val) {
8356                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8357                                                    reg, NULL);
8358                 if (ret != 0)
8359                         return ret;
8360                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8361                             "with value 0x%08x",
8362                             I40E_GL_PRS_FVBM(2), reg);
8363                 i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
8364         } else {
8365                 ret = 0;
8366         }
8367         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8368                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8369
8370         return ret;
8371 }
8372
8373 static int
8374 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8375 {
8376         int ret = -EINVAL;
8377
8378         if (!hw || !cfg)
8379                 return -EINVAL;
8380
8381         switch (cfg->cfg_type) {
8382         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8383                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8384                 break;
8385         default:
8386                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8387                 break;
8388         }
8389
8390         return ret;
8391 }
8392
8393 static int
8394 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8395                                enum rte_filter_op filter_op,
8396                                void *arg)
8397 {
8398         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8399         int ret = I40E_ERR_PARAM;
8400
8401         switch (filter_op) {
8402         case RTE_ETH_FILTER_SET:
8403                 ret = i40e_dev_global_config_set(hw,
8404                         (struct rte_eth_global_cfg *)arg);
8405                 break;
8406         default:
8407                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8408                 break;
8409         }
8410
8411         return ret;
8412 }
8413
8414 static int
8415 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8416                           enum rte_filter_op filter_op,
8417                           void *arg)
8418 {
8419         struct rte_eth_tunnel_filter_conf *filter;
8420         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8421         int ret = I40E_SUCCESS;
8422
8423         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8424
8425         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8426                 return I40E_ERR_PARAM;
8427
8428         switch (filter_op) {
8429         case RTE_ETH_FILTER_NOP:
8430                 if (!(pf->flags & I40E_FLAG_VXLAN))
8431                         ret = I40E_NOT_SUPPORTED;
8432                 break;
8433         case RTE_ETH_FILTER_ADD:
8434                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8435                 break;
8436         case RTE_ETH_FILTER_DELETE:
8437                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8438                 break;
8439         default:
8440                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8441                 ret = I40E_ERR_PARAM;
8442                 break;
8443         }
8444
8445         return ret;
8446 }
8447
8448 static int
8449 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8450 {
8451         int ret = 0;
8452         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8453
8454         /* RSS setup */
8455         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8456                 ret = i40e_pf_config_rss(pf);
8457         else
8458                 i40e_pf_disable_rss(pf);
8459
8460         return ret;
8461 }
8462
8463 /* Get the symmetric hash enable configurations per port */
8464 static void
8465 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8466 {
8467         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8468
8469         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8470 }
8471
8472 /* Set the symmetric hash enable configurations per port */
8473 static void
8474 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8475 {
8476         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8477
8478         if (enable > 0) {
8479                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8480                         PMD_DRV_LOG(INFO,
8481                                 "Symmetric hash has already been enabled");
8482                         return;
8483                 }
8484                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8485         } else {
8486                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8487                         PMD_DRV_LOG(INFO,
8488                                 "Symmetric hash has already been disabled");
8489                         return;
8490                 }
8491                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8492         }
8493         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8494         I40E_WRITE_FLUSH(hw);
8495 }
8496
8497 /*
8498  * Get global configurations of hash function type and symmetric hash enable
8499  * per flow type (pctype). Note that global configuration means it affects all
8500  * the ports on the same NIC.
8501  */
8502 static int
8503 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8504                                    struct rte_eth_hash_global_conf *g_cfg)
8505 {
8506         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8507         uint32_t reg;
8508         uint16_t i, j;
8509
8510         memset(g_cfg, 0, sizeof(*g_cfg));
8511         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8512         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8513                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8514         else
8515                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8516         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8517                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8518
8519         /*
8520          * As i40e supports less than 64 flow types, only first 64 bits need to
8521          * be checked.
8522          */
8523         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8524                 g_cfg->valid_bit_mask[i] = 0ULL;
8525                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8526         }
8527
8528         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8529
8530         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8531                 if (!adapter->pctypes_tbl[i])
8532                         continue;
8533                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8534                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8535                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8536                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8537                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8538                                         g_cfg->sym_hash_enable_mask[0] |=
8539                                                                 (1ULL << i);
8540                                 }
8541                         }
8542                 }
8543         }
8544
8545         return 0;
8546 }
8547
8548 static int
8549 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8550                               const struct rte_eth_hash_global_conf *g_cfg)
8551 {
8552         uint32_t i;
8553         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8554
8555         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8556                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8557                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8558                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8559                                                 g_cfg->hash_func);
8560                 return -EINVAL;
8561         }
8562
8563         /*
8564          * As i40e supports less than 64 flow types, only first 64 bits need to
8565          * be checked.
8566          */
8567         mask0 = g_cfg->valid_bit_mask[0];
8568         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8569                 if (i == 0) {
8570                         /* Check if any unsupported flow type configured */
8571                         if ((mask0 | i40e_mask) ^ i40e_mask)
8572                                 goto mask_err;
8573                 } else {
8574                         if (g_cfg->valid_bit_mask[i])
8575                                 goto mask_err;
8576                 }
8577         }
8578
8579         return 0;
8580
8581 mask_err:
8582         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8583
8584         return -EINVAL;
8585 }
8586
8587 /*
8588  * Set global configurations of hash function type and symmetric hash enable
8589  * per flow type (pctype). Note any modifying global configuration will affect
8590  * all the ports on the same NIC.
8591  */
8592 static int
8593 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8594                                    struct rte_eth_hash_global_conf *g_cfg)
8595 {
8596         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8597         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8598         int ret;
8599         uint16_t i, j;
8600         uint32_t reg;
8601         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8602
8603         if (pf->support_multi_driver) {
8604                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8605                 return -ENOTSUP;
8606         }
8607
8608         /* Check the input parameters */
8609         ret = i40e_hash_global_config_check(adapter, g_cfg);
8610         if (ret < 0)
8611                 return ret;
8612
8613         /*
8614          * As i40e supports less than 64 flow types, only first 64 bits need to
8615          * be configured.
8616          */
8617         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8618                 if (mask0 & (1UL << i)) {
8619                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8620                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8621
8622                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8623                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8624                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8625                                         i40e_write_global_rx_ctl(hw,
8626                                                           I40E_GLQF_HSYM(j),
8627                                                           reg);
8628                         }
8629                         i40e_global_cfg_warning(I40E_WARNING_HSYM);
8630                 }
8631         }
8632
8633         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8634         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8635                 /* Toeplitz */
8636                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8637                         PMD_DRV_LOG(DEBUG,
8638                                 "Hash function already set to Toeplitz");
8639                         goto out;
8640                 }
8641                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8642         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8643                 /* Simple XOR */
8644                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8645                         PMD_DRV_LOG(DEBUG,
8646                                 "Hash function already set to Simple XOR");
8647                         goto out;
8648                 }
8649                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8650         } else
8651                 /* Use the default, and keep it as it is */
8652                 goto out;
8653
8654         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8655         i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
8656
8657 out:
8658         I40E_WRITE_FLUSH(hw);
8659
8660         return 0;
8661 }
8662
8663 /**
8664  * Valid input sets for hash and flow director filters per PCTYPE
8665  */
8666 static uint64_t
8667 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8668                 enum rte_filter_type filter)
8669 {
8670         uint64_t valid;
8671
8672         static const uint64_t valid_hash_inset_table[] = {
8673                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8674                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8675                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8676                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8677                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8678                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8679                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8680                         I40E_INSET_FLEX_PAYLOAD,
8681                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8682                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8683                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8684                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8685                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8686                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8687                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8688                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8689                         I40E_INSET_FLEX_PAYLOAD,
8690                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8691                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8692                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8693                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8694                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8695                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8696                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8697                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8698                         I40E_INSET_FLEX_PAYLOAD,
8699                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8700                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8701                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8702                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8703                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8704                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8705                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8706                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8707                         I40E_INSET_FLEX_PAYLOAD,
8708                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8709                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8710                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8711                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8712                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8713                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8714                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8715                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8716                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8717                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8718                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8719                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8720                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8721                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8722                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8723                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8724                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8725                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8726                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8727                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8728                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8729                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8730                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8731                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8732                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8733                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8734                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8735                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8736                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8737                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8738                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8739                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8740                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8741                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8742                         I40E_INSET_FLEX_PAYLOAD,
8743                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8744                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8745                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8746                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8747                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8748                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8749                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8750                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8751                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8752                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8753                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8754                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8755                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8756                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8757                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8758                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8759                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8760                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8761                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8762                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8763                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8764                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8765                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8766                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8767                         I40E_INSET_FLEX_PAYLOAD,
8768                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8769                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8770                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8771                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8772                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8773                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8774                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8775                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8776                         I40E_INSET_FLEX_PAYLOAD,
8777                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8778                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8779                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8780                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8781                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8782                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8783                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8784                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8785                         I40E_INSET_FLEX_PAYLOAD,
8786                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8787                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8788                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8789                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8790                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8791                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8792                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8793                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8794                         I40E_INSET_FLEX_PAYLOAD,
8795                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8796                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8797                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8798                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8799                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8800                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8801                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8802                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8803                         I40E_INSET_FLEX_PAYLOAD,
8804                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8805                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8806                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8807                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8808                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8809                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8810                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8811                         I40E_INSET_FLEX_PAYLOAD,
8812                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8813                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8814                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8815                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8816                         I40E_INSET_FLEX_PAYLOAD,
8817         };
8818
8819         /**
8820          * Flow director supports only fields defined in
8821          * union rte_eth_fdir_flow.
8822          */
8823         static const uint64_t valid_fdir_inset_table[] = {
8824                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8825                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8826                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8827                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8828                 I40E_INSET_IPV4_TTL,
8829                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8830                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8831                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8832                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8833                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8834                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8835                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8836                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8837                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8838                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8839                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8840                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8841                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8842                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8843                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8844                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8845                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8846                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8847                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8848                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8849                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8850                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8851                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8852                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8853                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8854                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8855                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8856                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8857                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8858                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8859                 I40E_INSET_SCTP_VT,
8860                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8861                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8862                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8863                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8864                 I40E_INSET_IPV4_TTL,
8865                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8866                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8867                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8868                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8869                 I40E_INSET_IPV6_HOP_LIMIT,
8870                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8871                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8872                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8873                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8874                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8875                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8876                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8877                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8878                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8879                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8880                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8881                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8882                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8883                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8884                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8885                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8886                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8887                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8888                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8889                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8890                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8891                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8892                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8893                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8894                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8895                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8896                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8897                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8898                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8899                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8900                 I40E_INSET_SCTP_VT,
8901                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8902                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8903                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8904                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8905                 I40E_INSET_IPV6_HOP_LIMIT,
8906                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8907                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8908                 I40E_INSET_LAST_ETHER_TYPE,
8909         };
8910
8911         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8912                 return 0;
8913         if (filter == RTE_ETH_FILTER_HASH)
8914                 valid = valid_hash_inset_table[pctype];
8915         else
8916                 valid = valid_fdir_inset_table[pctype];
8917
8918         return valid;
8919 }
8920
8921 /**
8922  * Validate if the input set is allowed for a specific PCTYPE
8923  */
8924 int
8925 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8926                 enum rte_filter_type filter, uint64_t inset)
8927 {
8928         uint64_t valid;
8929
8930         valid = i40e_get_valid_input_set(pctype, filter);
8931         if (inset & (~valid))
8932                 return -EINVAL;
8933
8934         return 0;
8935 }
8936
8937 /* default input set fields combination per pctype */
8938 uint64_t
8939 i40e_get_default_input_set(uint16_t pctype)
8940 {
8941         static const uint64_t default_inset_table[] = {
8942                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8943                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8944                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8945                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8946                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8947                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8948                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8949                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8950                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8951                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8952                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8953                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8954                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8955                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8956                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8957                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8958                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8959                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8960                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8961                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8962                         I40E_INSET_SCTP_VT,
8963                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8964                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8965                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8966                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8967                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8968                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8969                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8970                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8971                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8972                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8973                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8974                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8975                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8976                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8977                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8978                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8979                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8980                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8981                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8982                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8983                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8984                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8985                         I40E_INSET_SCTP_VT,
8986                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8987                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8988                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8989                         I40E_INSET_LAST_ETHER_TYPE,
8990         };
8991
8992         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8993                 return 0;
8994
8995         return default_inset_table[pctype];
8996 }
8997
8998 /**
8999  * Parse the input set from index to logical bit masks
9000  */
9001 static int
9002 i40e_parse_input_set(uint64_t *inset,
9003                      enum i40e_filter_pctype pctype,
9004                      enum rte_eth_input_set_field *field,
9005                      uint16_t size)
9006 {
9007         uint16_t i, j;
9008         int ret = -EINVAL;
9009
9010         static const struct {
9011                 enum rte_eth_input_set_field field;
9012                 uint64_t inset;
9013         } inset_convert_table[] = {
9014                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9015                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9016                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9017                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9018                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9019                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9020                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9021                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9022                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9023                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9024                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9025                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9026                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9027                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9028                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9029                         I40E_INSET_IPV6_NEXT_HDR},
9030                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9031                         I40E_INSET_IPV6_HOP_LIMIT},
9032                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9033                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9034                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9035                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9036                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9037                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9038                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9039                         I40E_INSET_SCTP_VT},
9040                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9041                         I40E_INSET_TUNNEL_DMAC},
9042                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9043                         I40E_INSET_VLAN_TUNNEL},
9044                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9045                         I40E_INSET_TUNNEL_ID},
9046                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9047                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9048                         I40E_INSET_FLEX_PAYLOAD_W1},
9049                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9050                         I40E_INSET_FLEX_PAYLOAD_W2},
9051                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9052                         I40E_INSET_FLEX_PAYLOAD_W3},
9053                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9054                         I40E_INSET_FLEX_PAYLOAD_W4},
9055                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9056                         I40E_INSET_FLEX_PAYLOAD_W5},
9057                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9058                         I40E_INSET_FLEX_PAYLOAD_W6},
9059                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9060                         I40E_INSET_FLEX_PAYLOAD_W7},
9061                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9062                         I40E_INSET_FLEX_PAYLOAD_W8},
9063         };
9064
9065         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9066                 return ret;
9067
9068         /* Only one item allowed for default or all */
9069         if (size == 1) {
9070                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9071                         *inset = i40e_get_default_input_set(pctype);
9072                         return 0;
9073                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9074                         *inset = I40E_INSET_NONE;
9075                         return 0;
9076                 }
9077         }
9078
9079         for (i = 0, *inset = 0; i < size; i++) {
9080                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9081                         if (field[i] == inset_convert_table[j].field) {
9082                                 *inset |= inset_convert_table[j].inset;
9083                                 break;
9084                         }
9085                 }
9086
9087                 /* It contains unsupported input set, return immediately */
9088                 if (j == RTE_DIM(inset_convert_table))
9089                         return ret;
9090         }
9091
9092         return 0;
9093 }
9094
9095 /**
9096  * Translate the input set from bit masks to register aware bit masks
9097  * and vice versa
9098  */
9099 uint64_t
9100 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9101 {
9102         uint64_t val = 0;
9103         uint16_t i;
9104
9105         struct inset_map {
9106                 uint64_t inset;
9107                 uint64_t inset_reg;
9108         };
9109
9110         static const struct inset_map inset_map_common[] = {
9111                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9112                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9113                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9114                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9115                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9116                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9117                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9118                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9119                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9120                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9121                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9122                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9123                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9124                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9125                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9126                 {I40E_INSET_TUNNEL_DMAC,
9127                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9128                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9129                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9130                 {I40E_INSET_TUNNEL_SRC_PORT,
9131                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9132                 {I40E_INSET_TUNNEL_DST_PORT,
9133                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9134                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9135                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9136                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9137                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9138                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9139                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9140                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9141                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9142                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9143         };
9144
9145     /* some different registers map in x722*/
9146         static const struct inset_map inset_map_diff_x722[] = {
9147                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9148                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9149                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9150                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9151         };
9152
9153         static const struct inset_map inset_map_diff_not_x722[] = {
9154                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9155                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9156                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9157                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9158         };
9159
9160         if (input == 0)
9161                 return val;
9162
9163         /* Translate input set to register aware inset */
9164         if (type == I40E_MAC_X722) {
9165                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9166                         if (input & inset_map_diff_x722[i].inset)
9167                                 val |= inset_map_diff_x722[i].inset_reg;
9168                 }
9169         } else {
9170                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9171                         if (input & inset_map_diff_not_x722[i].inset)
9172                                 val |= inset_map_diff_not_x722[i].inset_reg;
9173                 }
9174         }
9175
9176         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9177                 if (input & inset_map_common[i].inset)
9178                         val |= inset_map_common[i].inset_reg;
9179         }
9180
9181         return val;
9182 }
9183
9184 int
9185 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9186 {
9187         uint8_t i, idx = 0;
9188         uint64_t inset_need_mask = inset;
9189
9190         static const struct {
9191                 uint64_t inset;
9192                 uint32_t mask;
9193         } inset_mask_map[] = {
9194                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9195                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9196                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9197                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9198                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9199                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9200                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9201                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9202         };
9203
9204         if (!inset || !mask || !nb_elem)
9205                 return 0;
9206
9207         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9208                 /* Clear the inset bit, if no MASK is required,
9209                  * for example proto + ttl
9210                  */
9211                 if ((inset & inset_mask_map[i].inset) ==
9212                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9213                         inset_need_mask &= ~inset_mask_map[i].inset;
9214                 if (!inset_need_mask)
9215                         return 0;
9216         }
9217         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9218                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9219                     inset_mask_map[i].inset) {
9220                         if (idx >= nb_elem) {
9221                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9222                                 return -EINVAL;
9223                         }
9224                         mask[idx] = inset_mask_map[i].mask;
9225                         idx++;
9226                 }
9227         }
9228
9229         return idx;
9230 }
9231
9232 void
9233 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9234 {
9235         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9236
9237         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9238         if (reg != val)
9239                 i40e_write_rx_ctl(hw, addr, val);
9240         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9241                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9242 }
9243
9244 void
9245 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9246 {
9247         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9248
9249         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9250         if (reg != val)
9251                 i40e_write_global_rx_ctl(hw, addr, val);
9252         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9253                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9254 }
9255
9256 static void
9257 i40e_filter_input_set_init(struct i40e_pf *pf)
9258 {
9259         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9260         enum i40e_filter_pctype pctype;
9261         uint64_t input_set, inset_reg;
9262         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9263         int num, i;
9264         uint16_t flow_type;
9265
9266         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9267              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9268                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9269
9270                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9271                         continue;
9272
9273                 input_set = i40e_get_default_input_set(pctype);
9274
9275                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9276                                                    I40E_INSET_MASK_NUM_REG);
9277                 if (num < 0)
9278                         return;
9279                 if (pf->support_multi_driver && num > 0) {
9280                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9281                         return;
9282                 }
9283                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9284                                         input_set);
9285
9286                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9287                                       (uint32_t)(inset_reg & UINT32_MAX));
9288                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9289                                      (uint32_t)((inset_reg >>
9290                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9291                 if (!pf->support_multi_driver) {
9292                         i40e_check_write_global_reg(hw,
9293                                             I40E_GLQF_HASH_INSET(0, pctype),
9294                                             (uint32_t)(inset_reg & UINT32_MAX));
9295                         i40e_check_write_global_reg(hw,
9296                                              I40E_GLQF_HASH_INSET(1, pctype),
9297                                              (uint32_t)((inset_reg >>
9298                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9299
9300                         for (i = 0; i < num; i++) {
9301                                 i40e_check_write_global_reg(hw,
9302                                                     I40E_GLQF_FD_MSK(i, pctype),
9303                                                     mask_reg[i]);
9304                                 i40e_check_write_global_reg(hw,
9305                                                   I40E_GLQF_HASH_MSK(i, pctype),
9306                                                   mask_reg[i]);
9307                         }
9308                         /*clear unused mask registers of the pctype */
9309                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9310                                 i40e_check_write_global_reg(hw,
9311                                                     I40E_GLQF_FD_MSK(i, pctype),
9312                                                     0);
9313                                 i40e_check_write_global_reg(hw,
9314                                                   I40E_GLQF_HASH_MSK(i, pctype),
9315                                                   0);
9316                         }
9317                 } else {
9318                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9319                 }
9320                 I40E_WRITE_FLUSH(hw);
9321
9322                 /* store the default input set */
9323                 if (!pf->support_multi_driver)
9324                         pf->hash_input_set[pctype] = input_set;
9325                 pf->fdir.input_set[pctype] = input_set;
9326         }
9327
9328         if (!pf->support_multi_driver) {
9329                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9330                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9331                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9332         }
9333 }
9334
9335 int
9336 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9337                          struct rte_eth_input_set_conf *conf)
9338 {
9339         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9340         enum i40e_filter_pctype pctype;
9341         uint64_t input_set, inset_reg = 0;
9342         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9343         int ret, i, num;
9344
9345         if (!conf) {
9346                 PMD_DRV_LOG(ERR, "Invalid pointer");
9347                 return -EFAULT;
9348         }
9349         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9350             conf->op != RTE_ETH_INPUT_SET_ADD) {
9351                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9352                 return -EINVAL;
9353         }
9354
9355         if (pf->support_multi_driver) {
9356                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9357                 return -ENOTSUP;
9358         }
9359
9360         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9361         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9362                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9363                 return -EINVAL;
9364         }
9365
9366         if (hw->mac.type == I40E_MAC_X722) {
9367                 /* get translated pctype value in fd pctype register */
9368                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9369                         I40E_GLQF_FD_PCTYPES((int)pctype));
9370         }
9371
9372         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9373                                    conf->inset_size);
9374         if (ret) {
9375                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9376                 return -EINVAL;
9377         }
9378
9379         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9380                 /* get inset value in register */
9381                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9382                 inset_reg <<= I40E_32_BIT_WIDTH;
9383                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9384                 input_set |= pf->hash_input_set[pctype];
9385         }
9386         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9387                                            I40E_INSET_MASK_NUM_REG);
9388         if (num < 0)
9389                 return -EINVAL;
9390
9391         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9392
9393         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9394                                     (uint32_t)(inset_reg & UINT32_MAX));
9395         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9396                                     (uint32_t)((inset_reg >>
9397                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9398         i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9399
9400         for (i = 0; i < num; i++)
9401                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9402                                             mask_reg[i]);
9403         /*clear unused mask registers of the pctype */
9404         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9405                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9406                                             0);
9407         i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9408         I40E_WRITE_FLUSH(hw);
9409
9410         pf->hash_input_set[pctype] = input_set;
9411         return 0;
9412 }
9413
9414 int
9415 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9416                          struct rte_eth_input_set_conf *conf)
9417 {
9418         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9419         enum i40e_filter_pctype pctype;
9420         uint64_t input_set, inset_reg = 0;
9421         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9422         int ret, i, num;
9423
9424         if (!hw || !conf) {
9425                 PMD_DRV_LOG(ERR, "Invalid pointer");
9426                 return -EFAULT;
9427         }
9428         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9429             conf->op != RTE_ETH_INPUT_SET_ADD) {
9430                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9431                 return -EINVAL;
9432         }
9433
9434         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9435
9436         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9437                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9438                 return -EINVAL;
9439         }
9440
9441         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9442                                    conf->inset_size);
9443         if (ret) {
9444                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9445                 return -EINVAL;
9446         }
9447
9448         /* get inset value in register */
9449         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9450         inset_reg <<= I40E_32_BIT_WIDTH;
9451         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9452
9453         /* Can not change the inset reg for flex payload for fdir,
9454          * it is done by writing I40E_PRTQF_FD_FLXINSET
9455          * in i40e_set_flex_mask_on_pctype.
9456          */
9457         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9458                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9459         else
9460                 input_set |= pf->fdir.input_set[pctype];
9461         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9462                                            I40E_INSET_MASK_NUM_REG);
9463         if (num < 0)
9464                 return -EINVAL;
9465         if (pf->support_multi_driver && num > 0) {
9466                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9467                 return -ENOTSUP;
9468         }
9469
9470         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9471
9472         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9473                               (uint32_t)(inset_reg & UINT32_MAX));
9474         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9475                              (uint32_t)((inset_reg >>
9476                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9477
9478         if (!pf->support_multi_driver) {
9479                 for (i = 0; i < num; i++)
9480                         i40e_check_write_global_reg(hw,
9481                                                     I40E_GLQF_FD_MSK(i, pctype),
9482                                                     mask_reg[i]);
9483                 /*clear unused mask registers of the pctype */
9484                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9485                         i40e_check_write_global_reg(hw,
9486                                                     I40E_GLQF_FD_MSK(i, pctype),
9487                                                     0);
9488                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9489         } else {
9490                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9491         }
9492         I40E_WRITE_FLUSH(hw);
9493
9494         pf->fdir.input_set[pctype] = input_set;
9495         return 0;
9496 }
9497
9498 static int
9499 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9500 {
9501         int ret = 0;
9502
9503         if (!hw || !info) {
9504                 PMD_DRV_LOG(ERR, "Invalid pointer");
9505                 return -EFAULT;
9506         }
9507
9508         switch (info->info_type) {
9509         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9510                 i40e_get_symmetric_hash_enable_per_port(hw,
9511                                         &(info->info.enable));
9512                 break;
9513         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9514                 ret = i40e_get_hash_filter_global_config(hw,
9515                                 &(info->info.global_conf));
9516                 break;
9517         default:
9518                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9519                                                         info->info_type);
9520                 ret = -EINVAL;
9521                 break;
9522         }
9523
9524         return ret;
9525 }
9526
9527 static int
9528 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9529 {
9530         int ret = 0;
9531
9532         if (!hw || !info) {
9533                 PMD_DRV_LOG(ERR, "Invalid pointer");
9534                 return -EFAULT;
9535         }
9536
9537         switch (info->info_type) {
9538         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9539                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9540                 break;
9541         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9542                 ret = i40e_set_hash_filter_global_config(hw,
9543                                 &(info->info.global_conf));
9544                 break;
9545         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9546                 ret = i40e_hash_filter_inset_select(hw,
9547                                                &(info->info.input_set_conf));
9548                 break;
9549
9550         default:
9551                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9552                                                         info->info_type);
9553                 ret = -EINVAL;
9554                 break;
9555         }
9556
9557         return ret;
9558 }
9559
9560 /* Operations for hash function */
9561 static int
9562 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9563                       enum rte_filter_op filter_op,
9564                       void *arg)
9565 {
9566         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9567         int ret = 0;
9568
9569         switch (filter_op) {
9570         case RTE_ETH_FILTER_NOP:
9571                 break;
9572         case RTE_ETH_FILTER_GET:
9573                 ret = i40e_hash_filter_get(hw,
9574                         (struct rte_eth_hash_filter_info *)arg);
9575                 break;
9576         case RTE_ETH_FILTER_SET:
9577                 ret = i40e_hash_filter_set(hw,
9578                         (struct rte_eth_hash_filter_info *)arg);
9579                 break;
9580         default:
9581                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9582                                                                 filter_op);
9583                 ret = -ENOTSUP;
9584                 break;
9585         }
9586
9587         return ret;
9588 }
9589
9590 /* Convert ethertype filter structure */
9591 static int
9592 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9593                               struct i40e_ethertype_filter *filter)
9594 {
9595         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9596         filter->input.ether_type = input->ether_type;
9597         filter->flags = input->flags;
9598         filter->queue = input->queue;
9599
9600         return 0;
9601 }
9602
9603 /* Check if there exists the ehtertype filter */
9604 struct i40e_ethertype_filter *
9605 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9606                                 const struct i40e_ethertype_filter_input *input)
9607 {
9608         int ret;
9609
9610         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9611         if (ret < 0)
9612                 return NULL;
9613
9614         return ethertype_rule->hash_map[ret];
9615 }
9616
9617 /* Add ethertype filter in SW list */
9618 static int
9619 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9620                                 struct i40e_ethertype_filter *filter)
9621 {
9622         struct i40e_ethertype_rule *rule = &pf->ethertype;
9623         int ret;
9624
9625         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9626         if (ret < 0) {
9627                 PMD_DRV_LOG(ERR,
9628                             "Failed to insert ethertype filter"
9629                             " to hash table %d!",
9630                             ret);
9631                 return ret;
9632         }
9633         rule->hash_map[ret] = filter;
9634
9635         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9636
9637         return 0;
9638 }
9639
9640 /* Delete ethertype filter in SW list */
9641 int
9642 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9643                              struct i40e_ethertype_filter_input *input)
9644 {
9645         struct i40e_ethertype_rule *rule = &pf->ethertype;
9646         struct i40e_ethertype_filter *filter;
9647         int ret;
9648
9649         ret = rte_hash_del_key(rule->hash_table, input);
9650         if (ret < 0) {
9651                 PMD_DRV_LOG(ERR,
9652                             "Failed to delete ethertype filter"
9653                             " to hash table %d!",
9654                             ret);
9655                 return ret;
9656         }
9657         filter = rule->hash_map[ret];
9658         rule->hash_map[ret] = NULL;
9659
9660         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9661         rte_free(filter);
9662
9663         return 0;
9664 }
9665
9666 /*
9667  * Configure ethertype filter, which can director packet by filtering
9668  * with mac address and ether_type or only ether_type
9669  */
9670 int
9671 i40e_ethertype_filter_set(struct i40e_pf *pf,
9672                         struct rte_eth_ethertype_filter *filter,
9673                         bool add)
9674 {
9675         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9676         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9677         struct i40e_ethertype_filter *ethertype_filter, *node;
9678         struct i40e_ethertype_filter check_filter;
9679         struct i40e_control_filter_stats stats;
9680         uint16_t flags = 0;
9681         int ret;
9682
9683         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9684                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9685                 return -EINVAL;
9686         }
9687         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9688                 filter->ether_type == ETHER_TYPE_IPv6) {
9689                 PMD_DRV_LOG(ERR,
9690                         "unsupported ether_type(0x%04x) in control packet filter.",
9691                         filter->ether_type);
9692                 return -EINVAL;
9693         }
9694         if (filter->ether_type == ETHER_TYPE_VLAN)
9695                 PMD_DRV_LOG(WARNING,
9696                         "filter vlan ether_type in first tag is not supported.");
9697
9698         /* Check if there is the filter in SW list */
9699         memset(&check_filter, 0, sizeof(check_filter));
9700         i40e_ethertype_filter_convert(filter, &check_filter);
9701         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9702                                                &check_filter.input);
9703         if (add && node) {
9704                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9705                 return -EINVAL;
9706         }
9707
9708         if (!add && !node) {
9709                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9710                 return -EINVAL;
9711         }
9712
9713         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9714                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9715         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9716                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9717         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9718
9719         memset(&stats, 0, sizeof(stats));
9720         ret = i40e_aq_add_rem_control_packet_filter(hw,
9721                         filter->mac_addr.addr_bytes,
9722                         filter->ether_type, flags,
9723                         pf->main_vsi->seid,
9724                         filter->queue, add, &stats, NULL);
9725
9726         PMD_DRV_LOG(INFO,
9727                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9728                 ret, stats.mac_etype_used, stats.etype_used,
9729                 stats.mac_etype_free, stats.etype_free);
9730         if (ret < 0)
9731                 return -ENOSYS;
9732
9733         /* Add or delete a filter in SW list */
9734         if (add) {
9735                 ethertype_filter = rte_zmalloc("ethertype_filter",
9736                                        sizeof(*ethertype_filter), 0);
9737                 if (ethertype_filter == NULL) {
9738                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9739                         return -ENOMEM;
9740                 }
9741
9742                 rte_memcpy(ethertype_filter, &check_filter,
9743                            sizeof(check_filter));
9744                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9745                 if (ret < 0)
9746                         rte_free(ethertype_filter);
9747         } else {
9748                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9749         }
9750
9751         return ret;
9752 }
9753
9754 /*
9755  * Handle operations for ethertype filter.
9756  */
9757 static int
9758 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9759                                 enum rte_filter_op filter_op,
9760                                 void *arg)
9761 {
9762         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9763         int ret = 0;
9764
9765         if (filter_op == RTE_ETH_FILTER_NOP)
9766                 return ret;
9767
9768         if (arg == NULL) {
9769                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9770                             filter_op);
9771                 return -EINVAL;
9772         }
9773
9774         switch (filter_op) {
9775         case RTE_ETH_FILTER_ADD:
9776                 ret = i40e_ethertype_filter_set(pf,
9777                         (struct rte_eth_ethertype_filter *)arg,
9778                         TRUE);
9779                 break;
9780         case RTE_ETH_FILTER_DELETE:
9781                 ret = i40e_ethertype_filter_set(pf,
9782                         (struct rte_eth_ethertype_filter *)arg,
9783                         FALSE);
9784                 break;
9785         default:
9786                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9787                 ret = -ENOSYS;
9788                 break;
9789         }
9790         return ret;
9791 }
9792
9793 static int
9794 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9795                      enum rte_filter_type filter_type,
9796                      enum rte_filter_op filter_op,
9797                      void *arg)
9798 {
9799         int ret = 0;
9800
9801         if (dev == NULL)
9802                 return -EINVAL;
9803
9804         switch (filter_type) {
9805         case RTE_ETH_FILTER_NONE:
9806                 /* For global configuration */
9807                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9808                 break;
9809         case RTE_ETH_FILTER_HASH:
9810                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9811                 break;
9812         case RTE_ETH_FILTER_MACVLAN:
9813                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9814                 break;
9815         case RTE_ETH_FILTER_ETHERTYPE:
9816                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9817                 break;
9818         case RTE_ETH_FILTER_TUNNEL:
9819                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9820                 break;
9821         case RTE_ETH_FILTER_FDIR:
9822                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9823                 break;
9824         case RTE_ETH_FILTER_GENERIC:
9825                 if (filter_op != RTE_ETH_FILTER_GET)
9826                         return -EINVAL;
9827                 *(const void **)arg = &i40e_flow_ops;
9828                 break;
9829         default:
9830                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9831                                                         filter_type);
9832                 ret = -EINVAL;
9833                 break;
9834         }
9835
9836         return ret;
9837 }
9838
9839 /*
9840  * Check and enable Extended Tag.
9841  * Enabling Extended Tag is important for 40G performance.
9842  */
9843 static void
9844 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9845 {
9846         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9847         uint32_t buf = 0;
9848         int ret;
9849
9850         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9851                                       PCI_DEV_CAP_REG);
9852         if (ret < 0) {
9853                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9854                             PCI_DEV_CAP_REG);
9855                 return;
9856         }
9857         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9858                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9859                 return;
9860         }
9861
9862         buf = 0;
9863         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9864                                       PCI_DEV_CTRL_REG);
9865         if (ret < 0) {
9866                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9867                             PCI_DEV_CTRL_REG);
9868                 return;
9869         }
9870         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9871                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9872                 return;
9873         }
9874         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9875         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9876                                        PCI_DEV_CTRL_REG);
9877         if (ret < 0) {
9878                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9879                             PCI_DEV_CTRL_REG);
9880                 return;
9881         }
9882 }
9883
9884 /*
9885  * As some registers wouldn't be reset unless a global hardware reset,
9886  * hardware initialization is needed to put those registers into an
9887  * expected initial state.
9888  */
9889 static void
9890 i40e_hw_init(struct rte_eth_dev *dev)
9891 {
9892         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9893
9894         i40e_enable_extended_tag(dev);
9895
9896         /* clear the PF Queue Filter control register */
9897         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9898
9899         /* Disable symmetric hash per port */
9900         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9901 }
9902
9903 /*
9904  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9905  * however this function will return only one highest pctype index,
9906  * which is not quite correct. This is known problem of i40e driver
9907  * and needs to be fixed later.
9908  */
9909 enum i40e_filter_pctype
9910 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9911 {
9912         int i;
9913         uint64_t pctype_mask;
9914
9915         if (flow_type < I40E_FLOW_TYPE_MAX) {
9916                 pctype_mask = adapter->pctypes_tbl[flow_type];
9917                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9918                         if (pctype_mask & (1ULL << i))
9919                                 return (enum i40e_filter_pctype)i;
9920                 }
9921         }
9922         return I40E_FILTER_PCTYPE_INVALID;
9923 }
9924
9925 uint16_t
9926 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9927                         enum i40e_filter_pctype pctype)
9928 {
9929         uint16_t flowtype;
9930         uint64_t pctype_mask = 1ULL << pctype;
9931
9932         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9933              flowtype++) {
9934                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9935                         return flowtype;
9936         }
9937
9938         return RTE_ETH_FLOW_UNKNOWN;
9939 }
9940
9941 /*
9942  * On X710, performance number is far from the expectation on recent firmware
9943  * versions; on XL710, performance number is also far from the expectation on
9944  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9945  * mode is enabled and port MAC address is equal to the packet destination MAC
9946  * address. The fix for this issue may not be integrated in the following
9947  * firmware version. So the workaround in software driver is needed. It needs
9948  * to modify the initial values of 3 internal only registers for both X710 and
9949  * XL710. Note that the values for X710 or XL710 could be different, and the
9950  * workaround can be removed when it is fixed in firmware in the future.
9951  */
9952
9953 /* For both X710 and XL710 */
9954 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9955 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9956 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9957
9958 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9959 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9960
9961 /* For X722 */
9962 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9963 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9964
9965 /* For X710 */
9966 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9967 /* For XL710 */
9968 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9969 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9970
9971 static int
9972 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9973 {
9974         enum i40e_status_code status;
9975         struct i40e_aq_get_phy_abilities_resp phy_ab;
9976         int ret = -ENOTSUP;
9977         int retries = 0;
9978
9979         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9980                                               NULL);
9981
9982         while (status) {
9983                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9984                         status);
9985                 retries++;
9986                 rte_delay_us(100000);
9987                 if  (retries < 5)
9988                         status = i40e_aq_get_phy_capabilities(hw, false,
9989                                         true, &phy_ab, NULL);
9990                 else
9991                         return ret;
9992         }
9993         return 0;
9994 }
9995
9996 static void
9997 i40e_configure_registers(struct i40e_hw *hw)
9998 {
9999         static struct {
10000                 uint32_t addr;
10001                 uint64_t val;
10002         } reg_table[] = {
10003                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10004                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10005                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10006         };
10007         uint64_t reg;
10008         uint32_t i;
10009         int ret;
10010
10011         for (i = 0; i < RTE_DIM(reg_table); i++) {
10012                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10013                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10014                                 reg_table[i].val =
10015                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10016                         else /* For X710/XL710/XXV710 */
10017                                 if (hw->aq.fw_maj_ver < 6)
10018                                         reg_table[i].val =
10019                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10020                                 else
10021                                         reg_table[i].val =
10022                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10023                 }
10024
10025                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10026                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10027                                 reg_table[i].val =
10028                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10029                         else /* For X710/XL710/XXV710 */
10030                                 reg_table[i].val =
10031                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10032                 }
10033
10034                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10035                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
10036                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
10037                                 reg_table[i].val =
10038                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
10039                         else /* For X710 */
10040                                 reg_table[i].val =
10041                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
10042                 }
10043
10044                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10045                                                         &reg, NULL);
10046                 if (ret < 0) {
10047                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10048                                                         reg_table[i].addr);
10049                         break;
10050                 }
10051                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10052                                                 reg_table[i].addr, reg);
10053                 if (reg == reg_table[i].val)
10054                         continue;
10055
10056                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10057                                                 reg_table[i].val, NULL);
10058                 if (ret < 0) {
10059                         PMD_DRV_LOG(ERR,
10060                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10061                                 reg_table[i].val, reg_table[i].addr);
10062                         break;
10063                 }
10064                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10065                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10066         }
10067 }
10068
10069 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
10070 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10071 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10072 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10073 static int
10074 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10075 {
10076         uint32_t reg;
10077         int ret;
10078
10079         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10080                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10081                 return -EINVAL;
10082         }
10083
10084         /* Configure for double VLAN RX stripping */
10085         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10086         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10087                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10088                 ret = i40e_aq_debug_write_register(hw,
10089                                                    I40E_VSI_TSR(vsi->vsi_id),
10090                                                    reg, NULL);
10091                 if (ret < 0) {
10092                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10093                                     vsi->vsi_id);
10094                         return I40E_ERR_CONFIG;
10095                 }
10096         }
10097
10098         /* Configure for double VLAN TX insertion */
10099         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10100         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10101                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10102                 ret = i40e_aq_debug_write_register(hw,
10103                                                    I40E_VSI_L2TAGSTXVALID(
10104                                                    vsi->vsi_id), reg, NULL);
10105                 if (ret < 0) {
10106                         PMD_DRV_LOG(ERR,
10107                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10108                                 vsi->vsi_id);
10109                         return I40E_ERR_CONFIG;
10110                 }
10111         }
10112
10113         return 0;
10114 }
10115
10116 /**
10117  * i40e_aq_add_mirror_rule
10118  * @hw: pointer to the hardware structure
10119  * @seid: VEB seid to add mirror rule to
10120  * @dst_id: destination vsi seid
10121  * @entries: Buffer which contains the entities to be mirrored
10122  * @count: number of entities contained in the buffer
10123  * @rule_id:the rule_id of the rule to be added
10124  *
10125  * Add a mirror rule for a given veb.
10126  *
10127  **/
10128 static enum i40e_status_code
10129 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10130                         uint16_t seid, uint16_t dst_id,
10131                         uint16_t rule_type, uint16_t *entries,
10132                         uint16_t count, uint16_t *rule_id)
10133 {
10134         struct i40e_aq_desc desc;
10135         struct i40e_aqc_add_delete_mirror_rule cmd;
10136         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10137                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10138                 &desc.params.raw;
10139         uint16_t buff_len;
10140         enum i40e_status_code status;
10141
10142         i40e_fill_default_direct_cmd_desc(&desc,
10143                                           i40e_aqc_opc_add_mirror_rule);
10144         memset(&cmd, 0, sizeof(cmd));
10145
10146         buff_len = sizeof(uint16_t) * count;
10147         desc.datalen = rte_cpu_to_le_16(buff_len);
10148         if (buff_len > 0)
10149                 desc.flags |= rte_cpu_to_le_16(
10150                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10151         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10152                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10153         cmd.num_entries = rte_cpu_to_le_16(count);
10154         cmd.seid = rte_cpu_to_le_16(seid);
10155         cmd.destination = rte_cpu_to_le_16(dst_id);
10156
10157         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10158         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10159         PMD_DRV_LOG(INFO,
10160                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10161                 hw->aq.asq_last_status, resp->rule_id,
10162                 resp->mirror_rules_used, resp->mirror_rules_free);
10163         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10164
10165         return status;
10166 }
10167
10168 /**
10169  * i40e_aq_del_mirror_rule
10170  * @hw: pointer to the hardware structure
10171  * @seid: VEB seid to add mirror rule to
10172  * @entries: Buffer which contains the entities to be mirrored
10173  * @count: number of entities contained in the buffer
10174  * @rule_id:the rule_id of the rule to be delete
10175  *
10176  * Delete a mirror rule for a given veb.
10177  *
10178  **/
10179 static enum i40e_status_code
10180 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10181                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10182                 uint16_t count, uint16_t rule_id)
10183 {
10184         struct i40e_aq_desc desc;
10185         struct i40e_aqc_add_delete_mirror_rule cmd;
10186         uint16_t buff_len = 0;
10187         enum i40e_status_code status;
10188         void *buff = NULL;
10189
10190         i40e_fill_default_direct_cmd_desc(&desc,
10191                                           i40e_aqc_opc_delete_mirror_rule);
10192         memset(&cmd, 0, sizeof(cmd));
10193         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10194                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10195                                                           I40E_AQ_FLAG_RD));
10196                 cmd.num_entries = count;
10197                 buff_len = sizeof(uint16_t) * count;
10198                 desc.datalen = rte_cpu_to_le_16(buff_len);
10199                 buff = (void *)entries;
10200         } else
10201                 /* rule id is filled in destination field for deleting mirror rule */
10202                 cmd.destination = rte_cpu_to_le_16(rule_id);
10203
10204         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10205                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10206         cmd.seid = rte_cpu_to_le_16(seid);
10207
10208         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10209         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10210
10211         return status;
10212 }
10213
10214 /**
10215  * i40e_mirror_rule_set
10216  * @dev: pointer to the hardware structure
10217  * @mirror_conf: mirror rule info
10218  * @sw_id: mirror rule's sw_id
10219  * @on: enable/disable
10220  *
10221  * set a mirror rule.
10222  *
10223  **/
10224 static int
10225 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10226                         struct rte_eth_mirror_conf *mirror_conf,
10227                         uint8_t sw_id, uint8_t on)
10228 {
10229         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10230         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10231         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10232         struct i40e_mirror_rule *parent = NULL;
10233         uint16_t seid, dst_seid, rule_id;
10234         uint16_t i, j = 0;
10235         int ret;
10236
10237         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10238
10239         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10240                 PMD_DRV_LOG(ERR,
10241                         "mirror rule can not be configured without veb or vfs.");
10242                 return -ENOSYS;
10243         }
10244         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10245                 PMD_DRV_LOG(ERR, "mirror table is full.");
10246                 return -ENOSPC;
10247         }
10248         if (mirror_conf->dst_pool > pf->vf_num) {
10249                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10250                                  mirror_conf->dst_pool);
10251                 return -EINVAL;
10252         }
10253
10254         seid = pf->main_vsi->veb->seid;
10255
10256         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10257                 if (sw_id <= it->index) {
10258                         mirr_rule = it;
10259                         break;
10260                 }
10261                 parent = it;
10262         }
10263         if (mirr_rule && sw_id == mirr_rule->index) {
10264                 if (on) {
10265                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10266                         return -EEXIST;
10267                 } else {
10268                         ret = i40e_aq_del_mirror_rule(hw, seid,
10269                                         mirr_rule->rule_type,
10270                                         mirr_rule->entries,
10271                                         mirr_rule->num_entries, mirr_rule->id);
10272                         if (ret < 0) {
10273                                 PMD_DRV_LOG(ERR,
10274                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10275                                         ret, hw->aq.asq_last_status);
10276                                 return -ENOSYS;
10277                         }
10278                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10279                         rte_free(mirr_rule);
10280                         pf->nb_mirror_rule--;
10281                         return 0;
10282                 }
10283         } else if (!on) {
10284                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10285                 return -ENOENT;
10286         }
10287
10288         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10289                                 sizeof(struct i40e_mirror_rule) , 0);
10290         if (!mirr_rule) {
10291                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10292                 return I40E_ERR_NO_MEMORY;
10293         }
10294         switch (mirror_conf->rule_type) {
10295         case ETH_MIRROR_VLAN:
10296                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10297                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10298                                 mirr_rule->entries[j] =
10299                                         mirror_conf->vlan.vlan_id[i];
10300                                 j++;
10301                         }
10302                 }
10303                 if (j == 0) {
10304                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10305                         rte_free(mirr_rule);
10306                         return -EINVAL;
10307                 }
10308                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10309                 break;
10310         case ETH_MIRROR_VIRTUAL_POOL_UP:
10311         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10312                 /* check if the specified pool bit is out of range */
10313                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10314                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10315                         rte_free(mirr_rule);
10316                         return -EINVAL;
10317                 }
10318                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10319                         if (mirror_conf->pool_mask & (1ULL << i)) {
10320                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10321                                 j++;
10322                         }
10323                 }
10324                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10325                         /* add pf vsi to entries */
10326                         mirr_rule->entries[j] = pf->main_vsi_seid;
10327                         j++;
10328                 }
10329                 if (j == 0) {
10330                         PMD_DRV_LOG(ERR, "pool is not specified.");
10331                         rte_free(mirr_rule);
10332                         return -EINVAL;
10333                 }
10334                 /* egress and ingress in aq commands means from switch but not port */
10335                 mirr_rule->rule_type =
10336                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10337                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10338                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10339                 break;
10340         case ETH_MIRROR_UPLINK_PORT:
10341                 /* egress and ingress in aq commands means from switch but not port*/
10342                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10343                 break;
10344         case ETH_MIRROR_DOWNLINK_PORT:
10345                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10346                 break;
10347         default:
10348                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10349                         mirror_conf->rule_type);
10350                 rte_free(mirr_rule);
10351                 return -EINVAL;
10352         }
10353
10354         /* If the dst_pool is equal to vf_num, consider it as PF */
10355         if (mirror_conf->dst_pool == pf->vf_num)
10356                 dst_seid = pf->main_vsi_seid;
10357         else
10358                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10359
10360         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10361                                       mirr_rule->rule_type, mirr_rule->entries,
10362                                       j, &rule_id);
10363         if (ret < 0) {
10364                 PMD_DRV_LOG(ERR,
10365                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10366                         ret, hw->aq.asq_last_status);
10367                 rte_free(mirr_rule);
10368                 return -ENOSYS;
10369         }
10370
10371         mirr_rule->index = sw_id;
10372         mirr_rule->num_entries = j;
10373         mirr_rule->id = rule_id;
10374         mirr_rule->dst_vsi_seid = dst_seid;
10375
10376         if (parent)
10377                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10378         else
10379                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10380
10381         pf->nb_mirror_rule++;
10382         return 0;
10383 }
10384
10385 /**
10386  * i40e_mirror_rule_reset
10387  * @dev: pointer to the device
10388  * @sw_id: mirror rule's sw_id
10389  *
10390  * reset a mirror rule.
10391  *
10392  **/
10393 static int
10394 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10395 {
10396         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10397         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10398         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10399         uint16_t seid;
10400         int ret;
10401
10402         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10403
10404         seid = pf->main_vsi->veb->seid;
10405
10406         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10407                 if (sw_id == it->index) {
10408                         mirr_rule = it;
10409                         break;
10410                 }
10411         }
10412         if (mirr_rule) {
10413                 ret = i40e_aq_del_mirror_rule(hw, seid,
10414                                 mirr_rule->rule_type,
10415                                 mirr_rule->entries,
10416                                 mirr_rule->num_entries, mirr_rule->id);
10417                 if (ret < 0) {
10418                         PMD_DRV_LOG(ERR,
10419                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10420                                 ret, hw->aq.asq_last_status);
10421                         return -ENOSYS;
10422                 }
10423                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10424                 rte_free(mirr_rule);
10425                 pf->nb_mirror_rule--;
10426         } else {
10427                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10428                 return -ENOENT;
10429         }
10430         return 0;
10431 }
10432
10433 static uint64_t
10434 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10435 {
10436         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10437         uint64_t systim_cycles;
10438
10439         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10440         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10441                         << 32;
10442
10443         return systim_cycles;
10444 }
10445
10446 static uint64_t
10447 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10448 {
10449         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10450         uint64_t rx_tstamp;
10451
10452         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10453         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10454                         << 32;
10455
10456         return rx_tstamp;
10457 }
10458
10459 static uint64_t
10460 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10461 {
10462         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10463         uint64_t tx_tstamp;
10464
10465         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10466         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10467                         << 32;
10468
10469         return tx_tstamp;
10470 }
10471
10472 static void
10473 i40e_start_timecounters(struct rte_eth_dev *dev)
10474 {
10475         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10476         struct i40e_adapter *adapter =
10477                         (struct i40e_adapter *)dev->data->dev_private;
10478         struct rte_eth_link link;
10479         uint32_t tsync_inc_l;
10480         uint32_t tsync_inc_h;
10481
10482         /* Get current link speed. */
10483         i40e_dev_link_update(dev, 1);
10484         rte_eth_linkstatus_get(dev, &link);
10485
10486         switch (link.link_speed) {
10487         case ETH_SPEED_NUM_40G:
10488                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10489                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10490                 break;
10491         case ETH_SPEED_NUM_10G:
10492                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10493                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10494                 break;
10495         case ETH_SPEED_NUM_1G:
10496                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10497                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10498                 break;
10499         default:
10500                 tsync_inc_l = 0x0;
10501                 tsync_inc_h = 0x0;
10502         }
10503
10504         /* Set the timesync increment value. */
10505         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10506         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10507
10508         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10509         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10510         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10511
10512         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10513         adapter->systime_tc.cc_shift = 0;
10514         adapter->systime_tc.nsec_mask = 0;
10515
10516         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10517         adapter->rx_tstamp_tc.cc_shift = 0;
10518         adapter->rx_tstamp_tc.nsec_mask = 0;
10519
10520         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10521         adapter->tx_tstamp_tc.cc_shift = 0;
10522         adapter->tx_tstamp_tc.nsec_mask = 0;
10523 }
10524
10525 static int
10526 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10527 {
10528         struct i40e_adapter *adapter =
10529                         (struct i40e_adapter *)dev->data->dev_private;
10530
10531         adapter->systime_tc.nsec += delta;
10532         adapter->rx_tstamp_tc.nsec += delta;
10533         adapter->tx_tstamp_tc.nsec += delta;
10534
10535         return 0;
10536 }
10537
10538 static int
10539 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10540 {
10541         uint64_t ns;
10542         struct i40e_adapter *adapter =
10543                         (struct i40e_adapter *)dev->data->dev_private;
10544
10545         ns = rte_timespec_to_ns(ts);
10546
10547         /* Set the timecounters to a new value. */
10548         adapter->systime_tc.nsec = ns;
10549         adapter->rx_tstamp_tc.nsec = ns;
10550         adapter->tx_tstamp_tc.nsec = ns;
10551
10552         return 0;
10553 }
10554
10555 static int
10556 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10557 {
10558         uint64_t ns, systime_cycles;
10559         struct i40e_adapter *adapter =
10560                         (struct i40e_adapter *)dev->data->dev_private;
10561
10562         systime_cycles = i40e_read_systime_cyclecounter(dev);
10563         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10564         *ts = rte_ns_to_timespec(ns);
10565
10566         return 0;
10567 }
10568
10569 static int
10570 i40e_timesync_enable(struct rte_eth_dev *dev)
10571 {
10572         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10573         uint32_t tsync_ctl_l;
10574         uint32_t tsync_ctl_h;
10575
10576         /* Stop the timesync system time. */
10577         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10578         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10579         /* Reset the timesync system time value. */
10580         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10581         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10582
10583         i40e_start_timecounters(dev);
10584
10585         /* Clear timesync registers. */
10586         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10587         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10588         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10589         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10590         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10591         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10592
10593         /* Enable timestamping of PTP packets. */
10594         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10595         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10596
10597         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10598         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10599         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10600
10601         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10602         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10603
10604         return 0;
10605 }
10606
10607 static int
10608 i40e_timesync_disable(struct rte_eth_dev *dev)
10609 {
10610         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10611         uint32_t tsync_ctl_l;
10612         uint32_t tsync_ctl_h;
10613
10614         /* Disable timestamping of transmitted PTP packets. */
10615         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10616         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10617
10618         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10619         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10620
10621         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10622         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10623
10624         /* Reset the timesync increment value. */
10625         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10626         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10627
10628         return 0;
10629 }
10630
10631 static int
10632 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10633                                 struct timespec *timestamp, uint32_t flags)
10634 {
10635         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10636         struct i40e_adapter *adapter =
10637                 (struct i40e_adapter *)dev->data->dev_private;
10638
10639         uint32_t sync_status;
10640         uint32_t index = flags & 0x03;
10641         uint64_t rx_tstamp_cycles;
10642         uint64_t ns;
10643
10644         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10645         if ((sync_status & (1 << index)) == 0)
10646                 return -EINVAL;
10647
10648         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10649         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10650         *timestamp = rte_ns_to_timespec(ns);
10651
10652         return 0;
10653 }
10654
10655 static int
10656 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10657                                 struct timespec *timestamp)
10658 {
10659         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10660         struct i40e_adapter *adapter =
10661                 (struct i40e_adapter *)dev->data->dev_private;
10662
10663         uint32_t sync_status;
10664         uint64_t tx_tstamp_cycles;
10665         uint64_t ns;
10666
10667         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10668         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10669                 return -EINVAL;
10670
10671         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10672         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10673         *timestamp = rte_ns_to_timespec(ns);
10674
10675         return 0;
10676 }
10677
10678 /*
10679  * i40e_parse_dcb_configure - parse dcb configure from user
10680  * @dev: the device being configured
10681  * @dcb_cfg: pointer of the result of parse
10682  * @*tc_map: bit map of enabled traffic classes
10683  *
10684  * Returns 0 on success, negative value on failure
10685  */
10686 static int
10687 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10688                          struct i40e_dcbx_config *dcb_cfg,
10689                          uint8_t *tc_map)
10690 {
10691         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10692         uint8_t i, tc_bw, bw_lf;
10693
10694         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10695
10696         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10697         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10698                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10699                 return -EINVAL;
10700         }
10701
10702         /* assume each tc has the same bw */
10703         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10704         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10705                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10706         /* to ensure the sum of tcbw is equal to 100 */
10707         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10708         for (i = 0; i < bw_lf; i++)
10709                 dcb_cfg->etscfg.tcbwtable[i]++;
10710
10711         /* assume each tc has the same Transmission Selection Algorithm */
10712         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10713                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10714
10715         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10716                 dcb_cfg->etscfg.prioritytable[i] =
10717                                 dcb_rx_conf->dcb_tc[i];
10718
10719         /* FW needs one App to configure HW */
10720         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10721         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10722         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10723         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10724
10725         if (dcb_rx_conf->nb_tcs == 0)
10726                 *tc_map = 1; /* tc0 only */
10727         else
10728                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10729
10730         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10731                 dcb_cfg->pfc.willing = 0;
10732                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10733                 dcb_cfg->pfc.pfcenable = *tc_map;
10734         }
10735         return 0;
10736 }
10737
10738
10739 static enum i40e_status_code
10740 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10741                               struct i40e_aqc_vsi_properties_data *info,
10742                               uint8_t enabled_tcmap)
10743 {
10744         enum i40e_status_code ret;
10745         int i, total_tc = 0;
10746         uint16_t qpnum_per_tc, bsf, qp_idx;
10747         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10748         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10749         uint16_t used_queues;
10750
10751         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10752         if (ret != I40E_SUCCESS)
10753                 return ret;
10754
10755         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10756                 if (enabled_tcmap & (1 << i))
10757                         total_tc++;
10758         }
10759         if (total_tc == 0)
10760                 total_tc = 1;
10761         vsi->enabled_tc = enabled_tcmap;
10762
10763         /* different VSI has different queues assigned */
10764         if (vsi->type == I40E_VSI_MAIN)
10765                 used_queues = dev_data->nb_rx_queues -
10766                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10767         else if (vsi->type == I40E_VSI_VMDQ2)
10768                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10769         else {
10770                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10771                 return I40E_ERR_NO_AVAILABLE_VSI;
10772         }
10773
10774         qpnum_per_tc = used_queues / total_tc;
10775         /* Number of queues per enabled TC */
10776         if (qpnum_per_tc == 0) {
10777                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10778                 return I40E_ERR_INVALID_QP_ID;
10779         }
10780         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10781                                 I40E_MAX_Q_PER_TC);
10782         bsf = rte_bsf32(qpnum_per_tc);
10783
10784         /**
10785          * Configure TC and queue mapping parameters, for enabled TC,
10786          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10787          * default queue will serve it.
10788          */
10789         qp_idx = 0;
10790         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10791                 if (vsi->enabled_tc & (1 << i)) {
10792                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10793                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10794                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10795                         qp_idx += qpnum_per_tc;
10796                 } else
10797                         info->tc_mapping[i] = 0;
10798         }
10799
10800         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10801         if (vsi->type == I40E_VSI_SRIOV) {
10802                 info->mapping_flags |=
10803                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10804                 for (i = 0; i < vsi->nb_qps; i++)
10805                         info->queue_mapping[i] =
10806                                 rte_cpu_to_le_16(vsi->base_queue + i);
10807         } else {
10808                 info->mapping_flags |=
10809                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10810                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10811         }
10812         info->valid_sections |=
10813                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10814
10815         return I40E_SUCCESS;
10816 }
10817
10818 /*
10819  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10820  * @veb: VEB to be configured
10821  * @tc_map: enabled TC bitmap
10822  *
10823  * Returns 0 on success, negative value on failure
10824  */
10825 static enum i40e_status_code
10826 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10827 {
10828         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10829         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10830         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10831         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10832         enum i40e_status_code ret = I40E_SUCCESS;
10833         int i;
10834         uint32_t bw_max;
10835
10836         /* Check if enabled_tc is same as existing or new TCs */
10837         if (veb->enabled_tc == tc_map)
10838                 return ret;
10839
10840         /* configure tc bandwidth */
10841         memset(&veb_bw, 0, sizeof(veb_bw));
10842         veb_bw.tc_valid_bits = tc_map;
10843         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10844         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10845                 if (tc_map & BIT_ULL(i))
10846                         veb_bw.tc_bw_share_credits[i] = 1;
10847         }
10848         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10849                                                    &veb_bw, NULL);
10850         if (ret) {
10851                 PMD_INIT_LOG(ERR,
10852                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10853                         hw->aq.asq_last_status);
10854                 return ret;
10855         }
10856
10857         memset(&ets_query, 0, sizeof(ets_query));
10858         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10859                                                    &ets_query, NULL);
10860         if (ret != I40E_SUCCESS) {
10861                 PMD_DRV_LOG(ERR,
10862                         "Failed to get switch_comp ETS configuration %u",
10863                         hw->aq.asq_last_status);
10864                 return ret;
10865         }
10866         memset(&bw_query, 0, sizeof(bw_query));
10867         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10868                                                   &bw_query, NULL);
10869         if (ret != I40E_SUCCESS) {
10870                 PMD_DRV_LOG(ERR,
10871                         "Failed to get switch_comp bandwidth configuration %u",
10872                         hw->aq.asq_last_status);
10873                 return ret;
10874         }
10875
10876         /* store and print out BW info */
10877         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10878         veb->bw_info.bw_max = ets_query.tc_bw_max;
10879         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10880         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10881         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10882                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10883                      I40E_16_BIT_WIDTH);
10884         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10885                 veb->bw_info.bw_ets_share_credits[i] =
10886                                 bw_query.tc_bw_share_credits[i];
10887                 veb->bw_info.bw_ets_credits[i] =
10888                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10889                 /* 4 bits per TC, 4th bit is reserved */
10890                 veb->bw_info.bw_ets_max[i] =
10891                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10892                                   RTE_LEN2MASK(3, uint8_t));
10893                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10894                             veb->bw_info.bw_ets_share_credits[i]);
10895                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10896                             veb->bw_info.bw_ets_credits[i]);
10897                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10898                             veb->bw_info.bw_ets_max[i]);
10899         }
10900
10901         veb->enabled_tc = tc_map;
10902
10903         return ret;
10904 }
10905
10906
10907 /*
10908  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10909  * @vsi: VSI to be configured
10910  * @tc_map: enabled TC bitmap
10911  *
10912  * Returns 0 on success, negative value on failure
10913  */
10914 static enum i40e_status_code
10915 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10916 {
10917         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10918         struct i40e_vsi_context ctxt;
10919         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10920         enum i40e_status_code ret = I40E_SUCCESS;
10921         int i;
10922
10923         /* Check if enabled_tc is same as existing or new TCs */
10924         if (vsi->enabled_tc == tc_map)
10925                 return ret;
10926
10927         /* configure tc bandwidth */
10928         memset(&bw_data, 0, sizeof(bw_data));
10929         bw_data.tc_valid_bits = tc_map;
10930         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10931         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10932                 if (tc_map & BIT_ULL(i))
10933                         bw_data.tc_bw_credits[i] = 1;
10934         }
10935         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10936         if (ret) {
10937                 PMD_INIT_LOG(ERR,
10938                         "AQ command Config VSI BW allocation per TC failed = %d",
10939                         hw->aq.asq_last_status);
10940                 goto out;
10941         }
10942         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10943                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10944
10945         /* Update Queue Pairs Mapping for currently enabled UPs */
10946         ctxt.seid = vsi->seid;
10947         ctxt.pf_num = hw->pf_id;
10948         ctxt.vf_num = 0;
10949         ctxt.uplink_seid = vsi->uplink_seid;
10950         ctxt.info = vsi->info;
10951         i40e_get_cap(hw);
10952         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10953         if (ret)
10954                 goto out;
10955
10956         /* Update the VSI after updating the VSI queue-mapping information */
10957         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10958         if (ret) {
10959                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10960                         hw->aq.asq_last_status);
10961                 goto out;
10962         }
10963         /* update the local VSI info with updated queue map */
10964         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10965                                         sizeof(vsi->info.tc_mapping));
10966         rte_memcpy(&vsi->info.queue_mapping,
10967                         &ctxt.info.queue_mapping,
10968                 sizeof(vsi->info.queue_mapping));
10969         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10970         vsi->info.valid_sections = 0;
10971
10972         /* query and update current VSI BW information */
10973         ret = i40e_vsi_get_bw_config(vsi);
10974         if (ret) {
10975                 PMD_INIT_LOG(ERR,
10976                          "Failed updating vsi bw info, err %s aq_err %s",
10977                          i40e_stat_str(hw, ret),
10978                          i40e_aq_str(hw, hw->aq.asq_last_status));
10979                 goto out;
10980         }
10981
10982         vsi->enabled_tc = tc_map;
10983
10984 out:
10985         return ret;
10986 }
10987
10988 /*
10989  * i40e_dcb_hw_configure - program the dcb setting to hw
10990  * @pf: pf the configuration is taken on
10991  * @new_cfg: new configuration
10992  * @tc_map: enabled TC bitmap
10993  *
10994  * Returns 0 on success, negative value on failure
10995  */
10996 static enum i40e_status_code
10997 i40e_dcb_hw_configure(struct i40e_pf *pf,
10998                       struct i40e_dcbx_config *new_cfg,
10999                       uint8_t tc_map)
11000 {
11001         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11002         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11003         struct i40e_vsi *main_vsi = pf->main_vsi;
11004         struct i40e_vsi_list *vsi_list;
11005         enum i40e_status_code ret;
11006         int i;
11007         uint32_t val;
11008
11009         /* Use the FW API if FW > v4.4*/
11010         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11011               (hw->aq.fw_maj_ver >= 5))) {
11012                 PMD_INIT_LOG(ERR,
11013                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11014                 return I40E_ERR_FIRMWARE_API_VERSION;
11015         }
11016
11017         /* Check if need reconfiguration */
11018         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11019                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11020                 return I40E_SUCCESS;
11021         }
11022
11023         /* Copy the new config to the current config */
11024         *old_cfg = *new_cfg;
11025         old_cfg->etsrec = old_cfg->etscfg;
11026         ret = i40e_set_dcb_config(hw);
11027         if (ret) {
11028                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11029                          i40e_stat_str(hw, ret),
11030                          i40e_aq_str(hw, hw->aq.asq_last_status));
11031                 return ret;
11032         }
11033         /* set receive Arbiter to RR mode and ETS scheme by default */
11034         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11035                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11036                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11037                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11038                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11039                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11040                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11041                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11042                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11043                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11044                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11045                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11046                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11047         }
11048         /* get local mib to check whether it is configured correctly */
11049         /* IEEE mode */
11050         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11051         /* Get Local DCB Config */
11052         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11053                                      &hw->local_dcbx_config);
11054
11055         /* if Veb is created, need to update TC of it at first */
11056         if (main_vsi->veb) {
11057                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11058                 if (ret)
11059                         PMD_INIT_LOG(WARNING,
11060                                  "Failed configuring TC for VEB seid=%d",
11061                                  main_vsi->veb->seid);
11062         }
11063         /* Update each VSI */
11064         i40e_vsi_config_tc(main_vsi, tc_map);
11065         if (main_vsi->veb) {
11066                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11067                         /* Beside main VSI and VMDQ VSIs, only enable default
11068                          * TC for other VSIs
11069                          */
11070                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11071                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11072                                                          tc_map);
11073                         else
11074                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11075                                                          I40E_DEFAULT_TCMAP);
11076                         if (ret)
11077                                 PMD_INIT_LOG(WARNING,
11078                                         "Failed configuring TC for VSI seid=%d",
11079                                         vsi_list->vsi->seid);
11080                         /* continue */
11081                 }
11082         }
11083         return I40E_SUCCESS;
11084 }
11085
11086 /*
11087  * i40e_dcb_init_configure - initial dcb config
11088  * @dev: device being configured
11089  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11090  *
11091  * Returns 0 on success, negative value on failure
11092  */
11093 int
11094 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11095 {
11096         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11097         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11098         int i, ret = 0;
11099
11100         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11101                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11102                 return -ENOTSUP;
11103         }
11104
11105         /* DCB initialization:
11106          * Update DCB configuration from the Firmware and configure
11107          * LLDP MIB change event.
11108          */
11109         if (sw_dcb == TRUE) {
11110                 ret = i40e_init_dcb(hw);
11111                 /* If lldp agent is stopped, the return value from
11112                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11113                  * adminq status. Otherwise, it should return success.
11114                  */
11115                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11116                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11117                         memset(&hw->local_dcbx_config, 0,
11118                                 sizeof(struct i40e_dcbx_config));
11119                         /* set dcb default configuration */
11120                         hw->local_dcbx_config.etscfg.willing = 0;
11121                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11122                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11123                         hw->local_dcbx_config.etscfg.tsatable[0] =
11124                                                 I40E_IEEE_TSA_ETS;
11125                         /* all UPs mapping to TC0 */
11126                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11127                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11128                         hw->local_dcbx_config.etsrec =
11129                                 hw->local_dcbx_config.etscfg;
11130                         hw->local_dcbx_config.pfc.willing = 0;
11131                         hw->local_dcbx_config.pfc.pfccap =
11132                                                 I40E_MAX_TRAFFIC_CLASS;
11133                         /* FW needs one App to configure HW */
11134                         hw->local_dcbx_config.numapps = 1;
11135                         hw->local_dcbx_config.app[0].selector =
11136                                                 I40E_APP_SEL_ETHTYPE;
11137                         hw->local_dcbx_config.app[0].priority = 3;
11138                         hw->local_dcbx_config.app[0].protocolid =
11139                                                 I40E_APP_PROTOID_FCOE;
11140                         ret = i40e_set_dcb_config(hw);
11141                         if (ret) {
11142                                 PMD_INIT_LOG(ERR,
11143                                         "default dcb config fails. err = %d, aq_err = %d.",
11144                                         ret, hw->aq.asq_last_status);
11145                                 return -ENOSYS;
11146                         }
11147                 } else {
11148                         PMD_INIT_LOG(ERR,
11149                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11150                                 ret, hw->aq.asq_last_status);
11151                         return -ENOTSUP;
11152                 }
11153         } else {
11154                 ret = i40e_aq_start_lldp(hw, NULL);
11155                 if (ret != I40E_SUCCESS)
11156                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11157
11158                 ret = i40e_init_dcb(hw);
11159                 if (!ret) {
11160                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11161                                 PMD_INIT_LOG(ERR,
11162                                         "HW doesn't support DCBX offload.");
11163                                 return -ENOTSUP;
11164                         }
11165                 } else {
11166                         PMD_INIT_LOG(ERR,
11167                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11168                                 ret, hw->aq.asq_last_status);
11169                         return -ENOTSUP;
11170                 }
11171         }
11172         return 0;
11173 }
11174
11175 /*
11176  * i40e_dcb_setup - setup dcb related config
11177  * @dev: device being configured
11178  *
11179  * Returns 0 on success, negative value on failure
11180  */
11181 static int
11182 i40e_dcb_setup(struct rte_eth_dev *dev)
11183 {
11184         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11185         struct i40e_dcbx_config dcb_cfg;
11186         uint8_t tc_map = 0;
11187         int ret = 0;
11188
11189         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11190                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11191                 return -ENOTSUP;
11192         }
11193
11194         if (pf->vf_num != 0)
11195                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11196
11197         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11198         if (ret) {
11199                 PMD_INIT_LOG(ERR, "invalid dcb config");
11200                 return -EINVAL;
11201         }
11202         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11203         if (ret) {
11204                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11205                 return -ENOSYS;
11206         }
11207
11208         return 0;
11209 }
11210
11211 static int
11212 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11213                       struct rte_eth_dcb_info *dcb_info)
11214 {
11215         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11216         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11217         struct i40e_vsi *vsi = pf->main_vsi;
11218         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11219         uint16_t bsf, tc_mapping;
11220         int i, j = 0;
11221
11222         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11223                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11224         else
11225                 dcb_info->nb_tcs = 1;
11226         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11227                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11228         for (i = 0; i < dcb_info->nb_tcs; i++)
11229                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11230
11231         /* get queue mapping if vmdq is disabled */
11232         if (!pf->nb_cfg_vmdq_vsi) {
11233                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11234                         if (!(vsi->enabled_tc & (1 << i)))
11235                                 continue;
11236                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11237                         dcb_info->tc_queue.tc_rxq[j][i].base =
11238                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11239                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11240                         dcb_info->tc_queue.tc_txq[j][i].base =
11241                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11242                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11243                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11244                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11245                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11246                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11247                 }
11248                 return 0;
11249         }
11250
11251         /* get queue mapping if vmdq is enabled */
11252         do {
11253                 vsi = pf->vmdq[j].vsi;
11254                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11255                         if (!(vsi->enabled_tc & (1 << i)))
11256                                 continue;
11257                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11258                         dcb_info->tc_queue.tc_rxq[j][i].base =
11259                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11260                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11261                         dcb_info->tc_queue.tc_txq[j][i].base =
11262                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11263                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11264                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11265                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11266                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11267                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11268                 }
11269                 j++;
11270         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11271         return 0;
11272 }
11273
11274 static int
11275 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11276 {
11277         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11278         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11279         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11280         uint16_t msix_intr;
11281
11282         msix_intr = intr_handle->intr_vec[queue_id];
11283         if (msix_intr == I40E_MISC_VEC_ID)
11284                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11285                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11286                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11287                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11288         else
11289                 I40E_WRITE_REG(hw,
11290                                I40E_PFINT_DYN_CTLN(msix_intr -
11291                                                    I40E_RX_VEC_START),
11292                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11293                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11294                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11295
11296         I40E_WRITE_FLUSH(hw);
11297         rte_intr_enable(&pci_dev->intr_handle);
11298
11299         return 0;
11300 }
11301
11302 static int
11303 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11304 {
11305         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11306         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11307         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11308         uint16_t msix_intr;
11309
11310         msix_intr = intr_handle->intr_vec[queue_id];
11311         if (msix_intr == I40E_MISC_VEC_ID)
11312                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11313                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11314         else
11315                 I40E_WRITE_REG(hw,
11316                                I40E_PFINT_DYN_CTLN(msix_intr -
11317                                                    I40E_RX_VEC_START),
11318                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11319         I40E_WRITE_FLUSH(hw);
11320
11321         return 0;
11322 }
11323
11324 static int i40e_get_regs(struct rte_eth_dev *dev,
11325                          struct rte_dev_reg_info *regs)
11326 {
11327         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11328         uint32_t *ptr_data = regs->data;
11329         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11330         const struct i40e_reg_info *reg_info;
11331
11332         if (ptr_data == NULL) {
11333                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11334                 regs->width = sizeof(uint32_t);
11335                 return 0;
11336         }
11337
11338         /* The first few registers have to be read using AQ operations */
11339         reg_idx = 0;
11340         while (i40e_regs_adminq[reg_idx].name) {
11341                 reg_info = &i40e_regs_adminq[reg_idx++];
11342                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11343                         for (arr_idx2 = 0;
11344                                         arr_idx2 <= reg_info->count2;
11345                                         arr_idx2++) {
11346                                 reg_offset = arr_idx * reg_info->stride1 +
11347                                         arr_idx2 * reg_info->stride2;
11348                                 reg_offset += reg_info->base_addr;
11349                                 ptr_data[reg_offset >> 2] =
11350                                         i40e_read_rx_ctl(hw, reg_offset);
11351                         }
11352         }
11353
11354         /* The remaining registers can be read using primitives */
11355         reg_idx = 0;
11356         while (i40e_regs_others[reg_idx].name) {
11357                 reg_info = &i40e_regs_others[reg_idx++];
11358                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11359                         for (arr_idx2 = 0;
11360                                         arr_idx2 <= reg_info->count2;
11361                                         arr_idx2++) {
11362                                 reg_offset = arr_idx * reg_info->stride1 +
11363                                         arr_idx2 * reg_info->stride2;
11364                                 reg_offset += reg_info->base_addr;
11365                                 ptr_data[reg_offset >> 2] =
11366                                         I40E_READ_REG(hw, reg_offset);
11367                         }
11368         }
11369
11370         return 0;
11371 }
11372
11373 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11374 {
11375         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11376
11377         /* Convert word count to byte count */
11378         return hw->nvm.sr_size << 1;
11379 }
11380
11381 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11382                            struct rte_dev_eeprom_info *eeprom)
11383 {
11384         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11385         uint16_t *data = eeprom->data;
11386         uint16_t offset, length, cnt_words;
11387         int ret_code;
11388
11389         offset = eeprom->offset >> 1;
11390         length = eeprom->length >> 1;
11391         cnt_words = length;
11392
11393         if (offset > hw->nvm.sr_size ||
11394                 offset + length > hw->nvm.sr_size) {
11395                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11396                 return -EINVAL;
11397         }
11398
11399         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11400
11401         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11402         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11403                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11404                 return -EIO;
11405         }
11406
11407         return 0;
11408 }
11409
11410 static int i40e_get_module_info(struct rte_eth_dev *dev,
11411                                 struct rte_eth_dev_module_info *modinfo)
11412 {
11413         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11414         uint32_t sff8472_comp = 0;
11415         uint32_t sff8472_swap = 0;
11416         uint32_t sff8636_rev = 0;
11417         i40e_status status;
11418         uint32_t type = 0;
11419
11420         /* Check if firmware supports reading module EEPROM. */
11421         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11422                 PMD_DRV_LOG(ERR,
11423                             "Module EEPROM memory read not supported. "
11424                             "Please update the NVM image.\n");
11425                 return -EINVAL;
11426         }
11427
11428         status = i40e_update_link_info(hw);
11429         if (status)
11430                 return -EIO;
11431
11432         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11433                 PMD_DRV_LOG(ERR,
11434                             "Cannot read module EEPROM memory. "
11435                             "No module connected.\n");
11436                 return -EINVAL;
11437         }
11438
11439         type = hw->phy.link_info.module_type[0];
11440
11441         switch (type) {
11442         case I40E_MODULE_TYPE_SFP:
11443                 status = i40e_aq_get_phy_register(hw,
11444                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11445                                 I40E_I2C_EEPROM_DEV_ADDR,
11446                                 I40E_MODULE_SFF_8472_COMP,
11447                                 &sff8472_comp, NULL);
11448                 if (status)
11449                         return -EIO;
11450
11451                 status = i40e_aq_get_phy_register(hw,
11452                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11453                                 I40E_I2C_EEPROM_DEV_ADDR,
11454                                 I40E_MODULE_SFF_8472_SWAP,
11455                                 &sff8472_swap, NULL);
11456                 if (status)
11457                         return -EIO;
11458
11459                 /* Check if the module requires address swap to access
11460                  * the other EEPROM memory page.
11461                  */
11462                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11463                         PMD_DRV_LOG(WARNING,
11464                                     "Module address swap to access "
11465                                     "page 0xA2 is not supported.\n");
11466                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11467                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11468                 } else if (sff8472_comp == 0x00) {
11469                         /* Module is not SFF-8472 compliant */
11470                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11471                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11472                 } else {
11473                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11474                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11475                 }
11476                 break;
11477         case I40E_MODULE_TYPE_QSFP_PLUS:
11478                 /* Read from memory page 0. */
11479                 status = i40e_aq_get_phy_register(hw,
11480                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11481                                 0,
11482                                 I40E_MODULE_REVISION_ADDR,
11483                                 &sff8636_rev, NULL);
11484                 if (status)
11485                         return -EIO;
11486                 /* Determine revision compliance byte */
11487                 if (sff8636_rev > 0x02) {
11488                         /* Module is SFF-8636 compliant */
11489                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11490                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11491                 } else {
11492                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11493                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11494                 }
11495                 break;
11496         case I40E_MODULE_TYPE_QSFP28:
11497                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11498                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11499                 break;
11500         default:
11501                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11502                 return -EINVAL;
11503         }
11504         return 0;
11505 }
11506
11507 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11508                                   struct rte_dev_eeprom_info *info)
11509 {
11510         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11511         bool is_sfp = false;
11512         i40e_status status;
11513         uint8_t *data = info->data;
11514         uint32_t value = 0;
11515         uint32_t i;
11516
11517         if (!info || !info->length || !data)
11518                 return -EINVAL;
11519
11520         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11521                 is_sfp = true;
11522
11523         for (i = 0; i < info->length; i++) {
11524                 u32 offset = i + info->offset;
11525                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11526
11527                 /* Check if we need to access the other memory page */
11528                 if (is_sfp) {
11529                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11530                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11531                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11532                         }
11533                 } else {
11534                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11535                                 /* Compute memory page number and offset. */
11536                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11537                                 addr++;
11538                         }
11539                 }
11540                 status = i40e_aq_get_phy_register(hw,
11541                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11542                                 addr, offset, &value, NULL);
11543                 if (status)
11544                         return -EIO;
11545                 data[i] = (uint8_t)value;
11546         }
11547         return 0;
11548 }
11549
11550 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11551                                      struct ether_addr *mac_addr)
11552 {
11553         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11554         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11555         struct i40e_vsi *vsi = pf->main_vsi;
11556         struct i40e_mac_filter_info mac_filter;
11557         struct i40e_mac_filter *f;
11558         int ret;
11559
11560         if (!is_valid_assigned_ether_addr(mac_addr)) {
11561                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11562                 return -EINVAL;
11563         }
11564
11565         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11566                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11567                         break;
11568         }
11569
11570         if (f == NULL) {
11571                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11572                 return -EIO;
11573         }
11574
11575         mac_filter = f->mac_info;
11576         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11577         if (ret != I40E_SUCCESS) {
11578                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11579                 return -EIO;
11580         }
11581         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11582         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11583         if (ret != I40E_SUCCESS) {
11584                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11585                 return -EIO;
11586         }
11587         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11588
11589         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11590                                         mac_addr->addr_bytes, NULL);
11591         if (ret != I40E_SUCCESS) {
11592                 PMD_DRV_LOG(ERR, "Failed to change mac");
11593                 return -EIO;
11594         }
11595
11596         return 0;
11597 }
11598
11599 static int
11600 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11601 {
11602         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11603         struct rte_eth_dev_data *dev_data = pf->dev_data;
11604         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11605         int ret = 0;
11606
11607         /* check if mtu is within the allowed range */
11608         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11609                 return -EINVAL;
11610
11611         /* mtu setting is forbidden if port is start */
11612         if (dev_data->dev_started) {
11613                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11614                             dev_data->port_id);
11615                 return -EBUSY;
11616         }
11617
11618         if (frame_size > ETHER_MAX_LEN)
11619                 dev_data->dev_conf.rxmode.offloads |=
11620                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11621         else
11622                 dev_data->dev_conf.rxmode.offloads &=
11623                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11624
11625         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11626
11627         return ret;
11628 }
11629
11630 /* Restore ethertype filter */
11631 static void
11632 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11633 {
11634         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11635         struct i40e_ethertype_filter_list
11636                 *ethertype_list = &pf->ethertype.ethertype_list;
11637         struct i40e_ethertype_filter *f;
11638         struct i40e_control_filter_stats stats;
11639         uint16_t flags;
11640
11641         TAILQ_FOREACH(f, ethertype_list, rules) {
11642                 flags = 0;
11643                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11644                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11645                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11646                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11647                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11648
11649                 memset(&stats, 0, sizeof(stats));
11650                 i40e_aq_add_rem_control_packet_filter(hw,
11651                                             f->input.mac_addr.addr_bytes,
11652                                             f->input.ether_type,
11653                                             flags, pf->main_vsi->seid,
11654                                             f->queue, 1, &stats, NULL);
11655         }
11656         PMD_DRV_LOG(INFO, "Ethertype filter:"
11657                     " mac_etype_used = %u, etype_used = %u,"
11658                     " mac_etype_free = %u, etype_free = %u",
11659                     stats.mac_etype_used, stats.etype_used,
11660                     stats.mac_etype_free, stats.etype_free);
11661 }
11662
11663 /* Restore tunnel filter */
11664 static void
11665 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11666 {
11667         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11668         struct i40e_vsi *vsi;
11669         struct i40e_pf_vf *vf;
11670         struct i40e_tunnel_filter_list
11671                 *tunnel_list = &pf->tunnel.tunnel_list;
11672         struct i40e_tunnel_filter *f;
11673         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11674         bool big_buffer = 0;
11675
11676         TAILQ_FOREACH(f, tunnel_list, rules) {
11677                 if (!f->is_to_vf)
11678                         vsi = pf->main_vsi;
11679                 else {
11680                         vf = &pf->vfs[f->vf_id];
11681                         vsi = vf->vsi;
11682                 }
11683                 memset(&cld_filter, 0, sizeof(cld_filter));
11684                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11685                         (struct ether_addr *)&cld_filter.element.outer_mac);
11686                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11687                         (struct ether_addr *)&cld_filter.element.inner_mac);
11688                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11689                 cld_filter.element.flags = f->input.flags;
11690                 cld_filter.element.tenant_id = f->input.tenant_id;
11691                 cld_filter.element.queue_number = f->queue;
11692                 rte_memcpy(cld_filter.general_fields,
11693                            f->input.general_fields,
11694                            sizeof(f->input.general_fields));
11695
11696                 if (((f->input.flags &
11697                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11698                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11699                     ((f->input.flags &
11700                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11701                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11702                     ((f->input.flags &
11703                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11704                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11705                         big_buffer = 1;
11706
11707                 if (big_buffer)
11708                         i40e_aq_add_cloud_filters_big_buffer(hw,
11709                                              vsi->seid, &cld_filter, 1);
11710                 else
11711                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11712                                                   &cld_filter.element, 1);
11713         }
11714 }
11715
11716 /* Restore rss filter */
11717 static inline void
11718 i40e_rss_filter_restore(struct i40e_pf *pf)
11719 {
11720         struct i40e_rte_flow_rss_conf *conf =
11721                                         &pf->rss_info;
11722         if (conf->conf.queue_num)
11723                 i40e_config_rss_filter(pf, conf, TRUE);
11724 }
11725
11726 static void
11727 i40e_filter_restore(struct i40e_pf *pf)
11728 {
11729         i40e_ethertype_filter_restore(pf);
11730         i40e_tunnel_filter_restore(pf);
11731         i40e_fdir_filter_restore(pf);
11732         i40e_rss_filter_restore(pf);
11733 }
11734
11735 static bool
11736 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11737 {
11738         if (strcmp(dev->device->driver->name, drv->driver.name))
11739                 return false;
11740
11741         return true;
11742 }
11743
11744 bool
11745 is_i40e_supported(struct rte_eth_dev *dev)
11746 {
11747         return is_device_supported(dev, &rte_i40e_pmd);
11748 }
11749
11750 struct i40e_customized_pctype*
11751 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11752 {
11753         int i;
11754
11755         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11756                 if (pf->customized_pctype[i].index == index)
11757                         return &pf->customized_pctype[i];
11758         }
11759         return NULL;
11760 }
11761
11762 static int
11763 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11764                               uint32_t pkg_size, uint32_t proto_num,
11765                               struct rte_pmd_i40e_proto_info *proto,
11766                               enum rte_pmd_i40e_package_op op)
11767 {
11768         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11769         uint32_t pctype_num;
11770         struct rte_pmd_i40e_ptype_info *pctype;
11771         uint32_t buff_size;
11772         struct i40e_customized_pctype *new_pctype = NULL;
11773         uint8_t proto_id;
11774         uint8_t pctype_value;
11775         char name[64];
11776         uint32_t i, j, n;
11777         int ret;
11778
11779         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11780             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11781                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11782                 return -1;
11783         }
11784
11785         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11786                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11787                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11788         if (ret) {
11789                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11790                 return -1;
11791         }
11792         if (!pctype_num) {
11793                 PMD_DRV_LOG(INFO, "No new pctype added");
11794                 return -1;
11795         }
11796
11797         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11798         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11799         if (!pctype) {
11800                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11801                 return -1;
11802         }
11803         /* get information about new pctype list */
11804         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11805                                         (uint8_t *)pctype, buff_size,
11806                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11807         if (ret) {
11808                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11809                 rte_free(pctype);
11810                 return -1;
11811         }
11812
11813         /* Update customized pctype. */
11814         for (i = 0; i < pctype_num; i++) {
11815                 pctype_value = pctype[i].ptype_id;
11816                 memset(name, 0, sizeof(name));
11817                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11818                         proto_id = pctype[i].protocols[j];
11819                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11820                                 continue;
11821                         for (n = 0; n < proto_num; n++) {
11822                                 if (proto[n].proto_id != proto_id)
11823                                         continue;
11824                                 strcat(name, proto[n].name);
11825                                 strcat(name, "_");
11826                                 break;
11827                         }
11828                 }
11829                 name[strlen(name) - 1] = '\0';
11830                 if (!strcmp(name, "GTPC"))
11831                         new_pctype =
11832                                 i40e_find_customized_pctype(pf,
11833                                                       I40E_CUSTOMIZED_GTPC);
11834                 else if (!strcmp(name, "GTPU_IPV4"))
11835                         new_pctype =
11836                                 i40e_find_customized_pctype(pf,
11837                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11838                 else if (!strcmp(name, "GTPU_IPV6"))
11839                         new_pctype =
11840                                 i40e_find_customized_pctype(pf,
11841                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11842                 else if (!strcmp(name, "GTPU"))
11843                         new_pctype =
11844                                 i40e_find_customized_pctype(pf,
11845                                                       I40E_CUSTOMIZED_GTPU);
11846                 if (new_pctype) {
11847                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11848                                 new_pctype->pctype = pctype_value;
11849                                 new_pctype->valid = true;
11850                         } else {
11851                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11852                                 new_pctype->valid = false;
11853                         }
11854                 }
11855         }
11856
11857         rte_free(pctype);
11858         return 0;
11859 }
11860
11861 static int
11862 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11863                              uint32_t pkg_size, uint32_t proto_num,
11864                              struct rte_pmd_i40e_proto_info *proto,
11865                              enum rte_pmd_i40e_package_op op)
11866 {
11867         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11868         uint16_t port_id = dev->data->port_id;
11869         uint32_t ptype_num;
11870         struct rte_pmd_i40e_ptype_info *ptype;
11871         uint32_t buff_size;
11872         uint8_t proto_id;
11873         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11874         uint32_t i, j, n;
11875         bool in_tunnel;
11876         int ret;
11877
11878         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11879             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11880                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11881                 return -1;
11882         }
11883
11884         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
11885                 rte_pmd_i40e_ptype_mapping_reset(port_id);
11886                 return 0;
11887         }
11888
11889         /* get information about new ptype num */
11890         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11891                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11892                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11893         if (ret) {
11894                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11895                 return ret;
11896         }
11897         if (!ptype_num) {
11898                 PMD_DRV_LOG(INFO, "No new ptype added");
11899                 return -1;
11900         }
11901
11902         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11903         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11904         if (!ptype) {
11905                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11906                 return -1;
11907         }
11908
11909         /* get information about new ptype list */
11910         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11911                                         (uint8_t *)ptype, buff_size,
11912                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11913         if (ret) {
11914                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11915                 rte_free(ptype);
11916                 return ret;
11917         }
11918
11919         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11920         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11921         if (!ptype_mapping) {
11922                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11923                 rte_free(ptype);
11924                 return -1;
11925         }
11926
11927         /* Update ptype mapping table. */
11928         for (i = 0; i < ptype_num; i++) {
11929                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11930                 ptype_mapping[i].sw_ptype = 0;
11931                 in_tunnel = false;
11932                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11933                         proto_id = ptype[i].protocols[j];
11934                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11935                                 continue;
11936                         for (n = 0; n < proto_num; n++) {
11937                                 if (proto[n].proto_id != proto_id)
11938                                         continue;
11939                                 memset(name, 0, sizeof(name));
11940                                 strcpy(name, proto[n].name);
11941                                 if (!strncasecmp(name, "PPPOE", 5))
11942                                         ptype_mapping[i].sw_ptype |=
11943                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11944                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11945                                          !in_tunnel) {
11946                                         ptype_mapping[i].sw_ptype |=
11947                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11948                                         ptype_mapping[i].sw_ptype |=
11949                                                 RTE_PTYPE_L4_FRAG;
11950                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11951                                            in_tunnel) {
11952                                         ptype_mapping[i].sw_ptype |=
11953                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11954                                         ptype_mapping[i].sw_ptype |=
11955                                                 RTE_PTYPE_INNER_L4_FRAG;
11956                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
11957                                         ptype_mapping[i].sw_ptype |=
11958                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11959                                         in_tunnel = true;
11960                                 } else if (!strncasecmp(name, "IPV4", 4) &&
11961                                            !in_tunnel)
11962                                         ptype_mapping[i].sw_ptype |=
11963                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11964                                 else if (!strncasecmp(name, "IPV4", 4) &&
11965                                          in_tunnel)
11966                                         ptype_mapping[i].sw_ptype |=
11967                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11968                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11969                                          !in_tunnel) {
11970                                         ptype_mapping[i].sw_ptype |=
11971                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11972                                         ptype_mapping[i].sw_ptype |=
11973                                                 RTE_PTYPE_L4_FRAG;
11974                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11975                                            in_tunnel) {
11976                                         ptype_mapping[i].sw_ptype |=
11977                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11978                                         ptype_mapping[i].sw_ptype |=
11979                                                 RTE_PTYPE_INNER_L4_FRAG;
11980                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
11981                                         ptype_mapping[i].sw_ptype |=
11982                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11983                                         in_tunnel = true;
11984                                 } else if (!strncasecmp(name, "IPV6", 4) &&
11985                                            !in_tunnel)
11986                                         ptype_mapping[i].sw_ptype |=
11987                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11988                                 else if (!strncasecmp(name, "IPV6", 4) &&
11989                                          in_tunnel)
11990                                         ptype_mapping[i].sw_ptype |=
11991                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11992                                 else if (!strncasecmp(name, "UDP", 3) &&
11993                                          !in_tunnel)
11994                                         ptype_mapping[i].sw_ptype |=
11995                                                 RTE_PTYPE_L4_UDP;
11996                                 else if (!strncasecmp(name, "UDP", 3) &&
11997                                          in_tunnel)
11998                                         ptype_mapping[i].sw_ptype |=
11999                                                 RTE_PTYPE_INNER_L4_UDP;
12000                                 else if (!strncasecmp(name, "TCP", 3) &&
12001                                          !in_tunnel)
12002                                         ptype_mapping[i].sw_ptype |=
12003                                                 RTE_PTYPE_L4_TCP;
12004                                 else if (!strncasecmp(name, "TCP", 3) &&
12005                                          in_tunnel)
12006                                         ptype_mapping[i].sw_ptype |=
12007                                                 RTE_PTYPE_INNER_L4_TCP;
12008                                 else if (!strncasecmp(name, "SCTP", 4) &&
12009                                          !in_tunnel)
12010                                         ptype_mapping[i].sw_ptype |=
12011                                                 RTE_PTYPE_L4_SCTP;
12012                                 else if (!strncasecmp(name, "SCTP", 4) &&
12013                                          in_tunnel)
12014                                         ptype_mapping[i].sw_ptype |=
12015                                                 RTE_PTYPE_INNER_L4_SCTP;
12016                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12017                                           !strncasecmp(name, "ICMPV6", 6)) &&
12018                                          !in_tunnel)
12019                                         ptype_mapping[i].sw_ptype |=
12020                                                 RTE_PTYPE_L4_ICMP;
12021                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12022                                           !strncasecmp(name, "ICMPV6", 6)) &&
12023                                          in_tunnel)
12024                                         ptype_mapping[i].sw_ptype |=
12025                                                 RTE_PTYPE_INNER_L4_ICMP;
12026                                 else if (!strncasecmp(name, "GTPC", 4)) {
12027                                         ptype_mapping[i].sw_ptype |=
12028                                                 RTE_PTYPE_TUNNEL_GTPC;
12029                                         in_tunnel = true;
12030                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12031                                         ptype_mapping[i].sw_ptype |=
12032                                                 RTE_PTYPE_TUNNEL_GTPU;
12033                                         in_tunnel = true;
12034                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12035                                         ptype_mapping[i].sw_ptype |=
12036                                                 RTE_PTYPE_TUNNEL_GRENAT;
12037                                         in_tunnel = true;
12038                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
12039                                         ptype_mapping[i].sw_ptype |=
12040                                                 RTE_PTYPE_TUNNEL_L2TP;
12041                                         in_tunnel = true;
12042                                 }
12043
12044                                 break;
12045                         }
12046                 }
12047         }
12048
12049         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12050                                                 ptype_num, 0);
12051         if (ret)
12052                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
12053
12054         rte_free(ptype_mapping);
12055         rte_free(ptype);
12056         return ret;
12057 }
12058
12059 void
12060 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12061                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12062 {
12063         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12064         uint32_t proto_num;
12065         struct rte_pmd_i40e_proto_info *proto;
12066         uint32_t buff_size;
12067         uint32_t i;
12068         int ret;
12069
12070         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12071             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12072                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12073                 return;
12074         }
12075
12076         /* get information about protocol number */
12077         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12078                                        (uint8_t *)&proto_num, sizeof(proto_num),
12079                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12080         if (ret) {
12081                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12082                 return;
12083         }
12084         if (!proto_num) {
12085                 PMD_DRV_LOG(INFO, "No new protocol added");
12086                 return;
12087         }
12088
12089         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12090         proto = rte_zmalloc("new_proto", buff_size, 0);
12091         if (!proto) {
12092                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12093                 return;
12094         }
12095
12096         /* get information about protocol list */
12097         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12098                                         (uint8_t *)proto, buff_size,
12099                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12100         if (ret) {
12101                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12102                 rte_free(proto);
12103                 return;
12104         }
12105
12106         /* Check if GTP is supported. */
12107         for (i = 0; i < proto_num; i++) {
12108                 if (!strncmp(proto[i].name, "GTP", 3)) {
12109                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12110                                 pf->gtp_support = true;
12111                         else
12112                                 pf->gtp_support = false;
12113                         break;
12114                 }
12115         }
12116
12117         /* Update customized pctype info */
12118         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12119                                             proto_num, proto, op);
12120         if (ret)
12121                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12122
12123         /* Update customized ptype info */
12124         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12125                                            proto_num, proto, op);
12126         if (ret)
12127                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12128
12129         rte_free(proto);
12130 }
12131
12132 /* Create a QinQ cloud filter
12133  *
12134  * The Fortville NIC has limited resources for tunnel filters,
12135  * so we can only reuse existing filters.
12136  *
12137  * In step 1 we define which Field Vector fields can be used for
12138  * filter types.
12139  * As we do not have the inner tag defined as a field,
12140  * we have to define it first, by reusing one of L1 entries.
12141  *
12142  * In step 2 we are replacing one of existing filter types with
12143  * a new one for QinQ.
12144  * As we reusing L1 and replacing L2, some of the default filter
12145  * types will disappear,which depends on L1 and L2 entries we reuse.
12146  *
12147  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12148  *
12149  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12150  *              later when we define the cloud filter.
12151  *      a.      Valid_flags.replace_cloud = 0
12152  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12153  *      c.      New_filter = 0x10
12154  *      d.      TR bit = 0xff (optional, not used here)
12155  *      e.      Buffer – 2 entries:
12156  *              i.      Byte 0 = 8 (outer vlan FV index).
12157  *                      Byte 1 = 0 (rsv)
12158  *                      Byte 2-3 = 0x0fff
12159  *              ii.     Byte 0 = 37 (inner vlan FV index).
12160  *                      Byte 1 =0 (rsv)
12161  *                      Byte 2-3 = 0x0fff
12162  *
12163  * Step 2:
12164  * 2.   Create cloud filter using two L1 filters entries: stag and
12165  *              new filter(outer vlan+ inner vlan)
12166  *      a.      Valid_flags.replace_cloud = 1
12167  *      b.      Old_filter = 1 (instead of outer IP)
12168  *      c.      New_filter = 0x10
12169  *      d.      Buffer – 2 entries:
12170  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12171  *                      Byte 1-3 = 0 (rsv)
12172  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12173  *                      Byte 9-11 = 0 (rsv)
12174  */
12175 static int
12176 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12177 {
12178         int ret = -ENOTSUP;
12179         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12180         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12181         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12182
12183         if (pf->support_multi_driver) {
12184                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12185                 return ret;
12186         }
12187
12188         /* Init */
12189         memset(&filter_replace, 0,
12190                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12191         memset(&filter_replace_buf, 0,
12192                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12193
12194         /* create L1 filter */
12195         filter_replace.old_filter_type =
12196                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12197         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12198         filter_replace.tr_bit = 0;
12199
12200         /* Prepare the buffer, 2 entries */
12201         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12202         filter_replace_buf.data[0] |=
12203                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12204         /* Field Vector 12b mask */
12205         filter_replace_buf.data[2] = 0xff;
12206         filter_replace_buf.data[3] = 0x0f;
12207         filter_replace_buf.data[4] =
12208                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12209         filter_replace_buf.data[4] |=
12210                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12211         /* Field Vector 12b mask */
12212         filter_replace_buf.data[6] = 0xff;
12213         filter_replace_buf.data[7] = 0x0f;
12214         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12215                         &filter_replace_buf);
12216         if (ret != I40E_SUCCESS)
12217                 return ret;
12218         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12219                     "cloud l1 type is changed from 0x%x to 0x%x",
12220                     filter_replace.old_filter_type,
12221                     filter_replace.new_filter_type);
12222
12223         /* Apply the second L2 cloud filter */
12224         memset(&filter_replace, 0,
12225                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12226         memset(&filter_replace_buf, 0,
12227                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12228
12229         /* create L2 filter, input for L2 filter will be L1 filter  */
12230         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12231         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12232         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12233
12234         /* Prepare the buffer, 2 entries */
12235         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12236         filter_replace_buf.data[0] |=
12237                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12238         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12239         filter_replace_buf.data[4] |=
12240                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12241         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12242                         &filter_replace_buf);
12243         if (!ret) {
12244                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
12245                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12246                             "cloud filter type is changed from 0x%x to 0x%x",
12247                             filter_replace.old_filter_type,
12248                             filter_replace.new_filter_type);
12249         }
12250         return ret;
12251 }
12252
12253 int
12254 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12255                    const struct rte_flow_action_rss *in)
12256 {
12257         if (in->key_len > RTE_DIM(out->key) ||
12258             in->queue_num > RTE_DIM(out->queue))
12259                 return -EINVAL;
12260         out->conf = (struct rte_flow_action_rss){
12261                 .func = in->func,
12262                 .level = in->level,
12263                 .types = in->types,
12264                 .key_len = in->key_len,
12265                 .queue_num = in->queue_num,
12266                 .key = memcpy(out->key, in->key, in->key_len),
12267                 .queue = memcpy(out->queue, in->queue,
12268                                 sizeof(*in->queue) * in->queue_num),
12269         };
12270         return 0;
12271 }
12272
12273 int
12274 i40e_action_rss_same(const struct rte_flow_action_rss *comp,
12275                      const struct rte_flow_action_rss *with)
12276 {
12277         return (comp->func == with->func &&
12278                 comp->level == with->level &&
12279                 comp->types == with->types &&
12280                 comp->key_len == with->key_len &&
12281                 comp->queue_num == with->queue_num &&
12282                 !memcmp(comp->key, with->key, with->key_len) &&
12283                 !memcmp(comp->queue, with->queue,
12284                         sizeof(*with->queue) * with->queue_num));
12285 }
12286
12287 int
12288 i40e_config_rss_filter(struct i40e_pf *pf,
12289                 struct i40e_rte_flow_rss_conf *conf, bool add)
12290 {
12291         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12292         uint32_t i, lut = 0;
12293         uint16_t j, num;
12294         struct rte_eth_rss_conf rss_conf = {
12295                 .rss_key = conf->conf.key_len ?
12296                         (void *)(uintptr_t)conf->conf.key : NULL,
12297                 .rss_key_len = conf->conf.key_len,
12298                 .rss_hf = conf->conf.types,
12299         };
12300         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12301
12302         if (!add) {
12303                 if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
12304                         i40e_pf_disable_rss(pf);
12305                         memset(rss_info, 0,
12306                                 sizeof(struct i40e_rte_flow_rss_conf));
12307                         return 0;
12308                 }
12309                 return -EINVAL;
12310         }
12311
12312         if (rss_info->conf.queue_num)
12313                 return -EINVAL;
12314
12315         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
12316          * It's necessary to calculate the actual PF queues that are configured.
12317          */
12318         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12319                 num = i40e_pf_calc_configured_queues_num(pf);
12320         else
12321                 num = pf->dev_data->nb_rx_queues;
12322
12323         num = RTE_MIN(num, conf->conf.queue_num);
12324         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12325                         num);
12326
12327         if (num == 0) {
12328                 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
12329                 return -ENOTSUP;
12330         }
12331
12332         /* Fill in redirection table */
12333         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12334                 if (j == num)
12335                         j = 0;
12336                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12337                         hw->func_caps.rss_table_entry_width) - 1));
12338                 if ((i & 3) == 3)
12339                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12340         }
12341
12342         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
12343                 i40e_pf_disable_rss(pf);
12344                 return 0;
12345         }
12346         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12347                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12348                 /* Random default keys */
12349                 static uint32_t rss_key_default[] = {0x6b793944,
12350                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12351                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12352                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12353
12354                 rss_conf.rss_key = (uint8_t *)rss_key_default;
12355                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12356                                                         sizeof(uint32_t);
12357         }
12358
12359         i40e_hw_rss_hash_set(pf, &rss_conf);
12360
12361         if (i40e_rss_conf_init(rss_info, &conf->conf))
12362                 return -EINVAL;
12363
12364         return 0;
12365 }
12366
12367 RTE_INIT(i40e_init_log);
12368 static void
12369 i40e_init_log(void)
12370 {
12371         i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12372         if (i40e_logtype_init >= 0)
12373                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12374         i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12375         if (i40e_logtype_driver >= 0)
12376                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12377 }
12378
12379 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12380                               QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12381                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");