ethdev: flatten RSS configuration in flow API
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_eth_ctrl.h>
28 #include <rte_tailq.h>
29 #include <rte_hash_crc.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45
46 #define I40E_CLEAR_PXE_WAIT_MS     200
47
48 /* Maximun number of capability elements */
49 #define I40E_MAX_CAP_ELE_NUM       128
50
51 /* Wait count and interval */
52 #define I40E_CHK_Q_ENA_COUNT       1000
53 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
54
55 /* Maximun number of VSI */
56 #define I40E_MAX_NUM_VSIS          (384UL)
57
58 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
59
60 /* Flow control default timer */
61 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
62
63 /* Flow control enable fwd bit */
64 #define I40E_PRTMAC_FWD_CTRL   0x00000001
65
66 /* Receive Packet Buffer size */
67 #define I40E_RXPBSIZE (968 * 1024)
68
69 /* Kilobytes shift */
70 #define I40E_KILOSHIFT 10
71
72 /* Flow control default high water */
73 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
74
75 /* Flow control default low water */
76 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
77
78 /* Receive Average Packet Size in Byte*/
79 #define I40E_PACKET_AVERAGE_SIZE 128
80
81 /* Mask of PF interrupt causes */
82 #define I40E_PFINT_ICR0_ENA_MASK ( \
83                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
84                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
85                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
86                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
87                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
91                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
92
93 #define I40E_FLOW_TYPES ( \
94         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
98         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
99         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
104         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
105
106 /* Additional timesync values. */
107 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
108 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
109 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
110 #define I40E_PRTTSYN_TSYNENA     0x80000000
111 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
112 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
113
114 /**
115  * Below are values for writing un-exposed registers suggested
116  * by silicon experts
117  */
118 /* Destination MAC address */
119 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
120 /* Source MAC address */
121 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
122 /* Outer (S-Tag) VLAN tag in the outer L2 header */
123 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
124 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
126 /* Single VLAN tag in the inner L2 header */
127 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
128 /* Source IPv4 address */
129 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
130 /* Destination IPv4 address */
131 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
132 /* Source IPv4 address for X722 */
133 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
134 /* Destination IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
136 /* IPv4 Protocol for X722 */
137 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
138 /* IPv4 Time to Live for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
140 /* IPv4 Type of Service (TOS) */
141 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
142 /* IPv4 Protocol */
143 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
144 /* IPv4 Time to Live */
145 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
146 /* Source IPv6 address */
147 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
148 /* Destination IPv6 address */
149 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
150 /* IPv6 Traffic Class (TC) */
151 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
152 /* IPv6 Next Header */
153 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
154 /* IPv6 Hop Limit */
155 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
156 /* Source L4 port */
157 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
158 /* Destination L4 port */
159 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
160 /* SCTP verification tag */
161 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
162 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
163 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
164 /* Source port of tunneling UDP */
165 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
166 /* Destination port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
168 /* UDP Tunneling ID, NVGRE/GRE key */
169 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
170 /* Last ether type */
171 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
172 /* Tunneling outer destination IPv4 address */
173 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
174 /* Tunneling outer destination IPv6 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
176 /* 1st word of flex payload */
177 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
178 /* 2nd word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
180 /* 3rd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
182 /* 4th word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
184 /* 5th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
186 /* 6th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
188 /* 7th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
190 /* 8th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
192 /* all 8 words flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
194 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
195
196 #define I40E_TRANSLATE_INSET 0
197 #define I40E_TRANSLATE_REG   1
198
199 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
200 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
201 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
202 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
203 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
204 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
205
206 /* PCI offset for querying capability */
207 #define PCI_DEV_CAP_REG            0xA4
208 /* PCI offset for enabling/disabling Extended Tag */
209 #define PCI_DEV_CTRL_REG           0xA8
210 /* Bit mask of Extended Tag capability */
211 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
212 /* Bit shift of Extended Tag enable/disable */
213 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
214 /* Bit mask of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
216
217 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
218 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
219 static int i40e_dev_configure(struct rte_eth_dev *dev);
220 static int i40e_dev_start(struct rte_eth_dev *dev);
221 static void i40e_dev_stop(struct rte_eth_dev *dev);
222 static void i40e_dev_close(struct rte_eth_dev *dev);
223 static int  i40e_dev_reset(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
225 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
229 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
230 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
231                                struct rte_eth_stats *stats);
232 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
233                                struct rte_eth_xstat *xstats, unsigned n);
234 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
235                                      struct rte_eth_xstat_name *xstats_names,
236                                      unsigned limit);
237 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
238 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
239                                             uint16_t queue_id,
240                                             uint8_t stat_idx,
241                                             uint8_t is_rx);
242 static int i40e_fw_version_get(struct rte_eth_dev *dev,
243                                 char *fw_version, size_t fw_size);
244 static void i40e_dev_info_get(struct rte_eth_dev *dev,
245                               struct rte_eth_dev_info *dev_info);
246 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
247                                 uint16_t vlan_id,
248                                 int on);
249 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
250                               enum rte_vlan_type vlan_type,
251                               uint16_t tpid);
252 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
254                                       uint16_t queue,
255                                       int on);
256 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
257 static int i40e_dev_led_on(struct rte_eth_dev *dev);
258 static int i40e_dev_led_off(struct rte_eth_dev *dev);
259 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
260                               struct rte_eth_fc_conf *fc_conf);
261 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
264                                        struct rte_eth_pfc_conf *pfc_conf);
265 static int i40e_macaddr_add(struct rte_eth_dev *dev,
266                             struct ether_addr *mac_addr,
267                             uint32_t index,
268                             uint32_t pool);
269 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
270 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
271                                     struct rte_eth_rss_reta_entry64 *reta_conf,
272                                     uint16_t reta_size);
273 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
274                                    struct rte_eth_rss_reta_entry64 *reta_conf,
275                                    uint16_t reta_size);
276
277 static int i40e_get_cap(struct i40e_hw *hw);
278 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
279 static int i40e_pf_setup(struct i40e_pf *pf);
280 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
281 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
282 static int i40e_dcb_setup(struct rte_eth_dev *dev);
283 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
284                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
285 static void i40e_stat_update_48(struct i40e_hw *hw,
286                                uint32_t hireg,
287                                uint32_t loreg,
288                                bool offset_loaded,
289                                uint64_t *offset,
290                                uint64_t *stat);
291 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
292 static void i40e_dev_interrupt_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294                                 uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
297                         uint32_t base);
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
299                         uint16_t num);
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303                                                 struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307                                              struct i40e_macvlan_filter *mv_f,
308                                              int num,
309                                              uint16_t vlan);
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312                                     struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314                                       struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316                                         struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321                                 enum rte_filter_op filter_op,
322                                 void *arg);
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324                                 enum rte_filter_type filter_type,
325                                 enum rte_filter_op filter_op,
326                                 void *arg);
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328                                   struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
334                                                      uint16_t seid,
335                                                      uint16_t rule_type,
336                                                      uint16_t *entries,
337                                                      uint16_t count,
338                                                      uint16_t rule_id);
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340                         struct rte_eth_mirror_conf *mirror_conf,
341                         uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
343
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347                                            struct timespec *timestamp,
348                                            uint32_t flags);
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350                                            struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
352
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356                                    struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358                                     const struct timespec *timestamp);
359
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
361                                          uint16_t queue_id);
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
363                                           uint16_t queue_id);
364
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366                          struct rte_dev_reg_info *regs);
367
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
369
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371                            struct rte_dev_eeprom_info *eeprom);
372
373 static int i40e_get_module_info(struct rte_eth_dev *dev,
374                                 struct rte_eth_dev_module_info *modinfo);
375 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
376                                   struct rte_dev_eeprom_info *info);
377
378 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
379                                       struct ether_addr *mac_addr);
380
381 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
382
383 static int i40e_ethertype_filter_convert(
384         const struct rte_eth_ethertype_filter *input,
385         struct i40e_ethertype_filter *filter);
386 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
387                                    struct i40e_ethertype_filter *filter);
388
389 static int i40e_tunnel_filter_convert(
390         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
391         struct i40e_tunnel_filter *tunnel_filter);
392 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
393                                 struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
395
396 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
397 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
398 static void i40e_filter_restore(struct i40e_pf *pf);
399 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
400
401 int i40e_logtype_init;
402 int i40e_logtype_driver;
403
404 static const struct rte_pci_id pci_id_i40e_map[] = {
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
425         { .vendor_id = 0, /* sentinel */ },
426 };
427
428 static const struct eth_dev_ops i40e_eth_dev_ops = {
429         .dev_configure                = i40e_dev_configure,
430         .dev_start                    = i40e_dev_start,
431         .dev_stop                     = i40e_dev_stop,
432         .dev_close                    = i40e_dev_close,
433         .dev_reset                    = i40e_dev_reset,
434         .promiscuous_enable           = i40e_dev_promiscuous_enable,
435         .promiscuous_disable          = i40e_dev_promiscuous_disable,
436         .allmulticast_enable          = i40e_dev_allmulticast_enable,
437         .allmulticast_disable         = i40e_dev_allmulticast_disable,
438         .dev_set_link_up              = i40e_dev_set_link_up,
439         .dev_set_link_down            = i40e_dev_set_link_down,
440         .link_update                  = i40e_dev_link_update,
441         .stats_get                    = i40e_dev_stats_get,
442         .xstats_get                   = i40e_dev_xstats_get,
443         .xstats_get_names             = i40e_dev_xstats_get_names,
444         .stats_reset                  = i40e_dev_stats_reset,
445         .xstats_reset                 = i40e_dev_stats_reset,
446         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
447         .fw_version_get               = i40e_fw_version_get,
448         .dev_infos_get                = i40e_dev_info_get,
449         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
450         .vlan_filter_set              = i40e_vlan_filter_set,
451         .vlan_tpid_set                = i40e_vlan_tpid_set,
452         .vlan_offload_set             = i40e_vlan_offload_set,
453         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
454         .vlan_pvid_set                = i40e_vlan_pvid_set,
455         .rx_queue_start               = i40e_dev_rx_queue_start,
456         .rx_queue_stop                = i40e_dev_rx_queue_stop,
457         .tx_queue_start               = i40e_dev_tx_queue_start,
458         .tx_queue_stop                = i40e_dev_tx_queue_stop,
459         .rx_queue_setup               = i40e_dev_rx_queue_setup,
460         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
461         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
462         .rx_queue_release             = i40e_dev_rx_queue_release,
463         .rx_queue_count               = i40e_dev_rx_queue_count,
464         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
465         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
466         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
467         .tx_queue_setup               = i40e_dev_tx_queue_setup,
468         .tx_queue_release             = i40e_dev_tx_queue_release,
469         .dev_led_on                   = i40e_dev_led_on,
470         .dev_led_off                  = i40e_dev_led_off,
471         .flow_ctrl_get                = i40e_flow_ctrl_get,
472         .flow_ctrl_set                = i40e_flow_ctrl_set,
473         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
474         .mac_addr_add                 = i40e_macaddr_add,
475         .mac_addr_remove              = i40e_macaddr_remove,
476         .reta_update                  = i40e_dev_rss_reta_update,
477         .reta_query                   = i40e_dev_rss_reta_query,
478         .rss_hash_update              = i40e_dev_rss_hash_update,
479         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
480         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
481         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
482         .filter_ctrl                  = i40e_dev_filter_ctrl,
483         .rxq_info_get                 = i40e_rxq_info_get,
484         .txq_info_get                 = i40e_txq_info_get,
485         .mirror_rule_set              = i40e_mirror_rule_set,
486         .mirror_rule_reset            = i40e_mirror_rule_reset,
487         .timesync_enable              = i40e_timesync_enable,
488         .timesync_disable             = i40e_timesync_disable,
489         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
490         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
491         .get_dcb_info                 = i40e_dev_get_dcb_info,
492         .timesync_adjust_time         = i40e_timesync_adjust_time,
493         .timesync_read_time           = i40e_timesync_read_time,
494         .timesync_write_time          = i40e_timesync_write_time,
495         .get_reg                      = i40e_get_regs,
496         .get_eeprom_length            = i40e_get_eeprom_length,
497         .get_eeprom                   = i40e_get_eeprom,
498         .get_module_info              = i40e_get_module_info,
499         .get_module_eeprom            = i40e_get_module_eeprom,
500         .mac_addr_set                 = i40e_set_default_mac_addr,
501         .mtu_set                      = i40e_dev_mtu_set,
502         .tm_ops_get                   = i40e_tm_ops_get,
503 };
504
505 /* store statistics names and its offset in stats structure */
506 struct rte_i40e_xstats_name_off {
507         char name[RTE_ETH_XSTATS_NAME_SIZE];
508         unsigned offset;
509 };
510
511 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
512         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
513         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
514         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
515         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
516         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
517                 rx_unknown_protocol)},
518         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
519         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
520         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
521         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
522 };
523
524 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
525                 sizeof(rte_i40e_stats_strings[0]))
526
527 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
528         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
529                 tx_dropped_link_down)},
530         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
531         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
532                 illegal_bytes)},
533         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
534         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
535                 mac_local_faults)},
536         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
537                 mac_remote_faults)},
538         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
539                 rx_length_errors)},
540         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
541         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
542         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
543         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
544         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
545         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_127)},
547         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_255)},
549         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
550                 rx_size_511)},
551         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
552                 rx_size_1023)},
553         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
554                 rx_size_1522)},
555         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
556                 rx_size_big)},
557         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
558                 rx_undersize)},
559         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
560                 rx_oversize)},
561         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
562                 mac_short_packet_dropped)},
563         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
564                 rx_fragments)},
565         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
566         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
567         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_127)},
569         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_255)},
571         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
572                 tx_size_511)},
573         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
574                 tx_size_1023)},
575         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
576                 tx_size_1522)},
577         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
578                 tx_size_big)},
579         {"rx_flow_director_atr_match_packets",
580                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
581         {"rx_flow_director_sb_match_packets",
582                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
583         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
584                 tx_lpi_status)},
585         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
586                 rx_lpi_status)},
587         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
588                 tx_lpi_count)},
589         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
590                 rx_lpi_count)},
591 };
592
593 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
594                 sizeof(rte_i40e_hw_port_strings[0]))
595
596 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
597         {"xon_packets", offsetof(struct i40e_hw_port_stats,
598                 priority_xon_rx)},
599         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xoff_rx)},
601 };
602
603 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
604                 sizeof(rte_i40e_rxq_prio_strings[0]))
605
606 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
607         {"xon_packets", offsetof(struct i40e_hw_port_stats,
608                 priority_xon_tx)},
609         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
610                 priority_xoff_tx)},
611         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
612                 priority_xon_2_xoff)},
613 };
614
615 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
616                 sizeof(rte_i40e_txq_prio_strings[0]))
617
618 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
619         struct rte_pci_device *pci_dev)
620 {
621         return rte_eth_dev_pci_generic_probe(pci_dev,
622                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
623 }
624
625 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
626 {
627         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
628 }
629
630 static struct rte_pci_driver rte_i40e_pmd = {
631         .id_table = pci_id_i40e_map,
632         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
633                      RTE_PCI_DRV_IOVA_AS_VA,
634         .probe = eth_i40e_pci_probe,
635         .remove = eth_i40e_pci_remove,
636 };
637
638 static inline void
639 i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
640 {
641         i40e_write_rx_ctl(hw, reg_addr, reg_val);
642         PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
643                     "with value 0x%08x",
644                     reg_addr, reg_val);
645 }
646
647 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
648 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
649 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
650
651 #ifndef I40E_GLQF_ORT
652 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
653 #endif
654 #ifndef I40E_GLQF_PIT
655 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
656 #endif
657 #ifndef I40E_GLQF_L3_MAP
658 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
659 #endif
660
661 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
662 {
663         /*
664          * Initialize registers for parsing packet type of QinQ
665          * This should be removed from code once proper
666          * configuration API is added to avoid configuration conflicts
667          * between ports of the same device.
668          */
669         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
670         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
671         i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
672 }
673
674 static inline void i40e_config_automask(struct i40e_pf *pf)
675 {
676         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
677         uint32_t val;
678
679         /* INTENA flag is not auto-cleared for interrupt */
680         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
681         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
682                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
683
684         /* If support multi-driver, PF will use INT0. */
685         if (!pf->support_multi_driver)
686                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
687
688         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
689 }
690
691 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
692
693 /*
694  * Add a ethertype filter to drop all flow control frames transmitted
695  * from VSIs.
696 */
697 static void
698 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
699 {
700         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
701         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
702                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
703                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
704         int ret;
705
706         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
707                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
708                                 pf->main_vsi_seid, 0,
709                                 TRUE, NULL, NULL);
710         if (ret)
711                 PMD_INIT_LOG(ERR,
712                         "Failed to add filter to drop flow control frames from VSIs.");
713 }
714
715 static int
716 floating_veb_list_handler(__rte_unused const char *key,
717                           const char *floating_veb_value,
718                           void *opaque)
719 {
720         int idx = 0;
721         unsigned int count = 0;
722         char *end = NULL;
723         int min, max;
724         bool *vf_floating_veb = opaque;
725
726         while (isblank(*floating_veb_value))
727                 floating_veb_value++;
728
729         /* Reset floating VEB configuration for VFs */
730         for (idx = 0; idx < I40E_MAX_VF; idx++)
731                 vf_floating_veb[idx] = false;
732
733         min = I40E_MAX_VF;
734         do {
735                 while (isblank(*floating_veb_value))
736                         floating_veb_value++;
737                 if (*floating_veb_value == '\0')
738                         return -1;
739                 errno = 0;
740                 idx = strtoul(floating_veb_value, &end, 10);
741                 if (errno || end == NULL)
742                         return -1;
743                 while (isblank(*end))
744                         end++;
745                 if (*end == '-') {
746                         min = idx;
747                 } else if ((*end == ';') || (*end == '\0')) {
748                         max = idx;
749                         if (min == I40E_MAX_VF)
750                                 min = idx;
751                         if (max >= I40E_MAX_VF)
752                                 max = I40E_MAX_VF - 1;
753                         for (idx = min; idx <= max; idx++) {
754                                 vf_floating_veb[idx] = true;
755                                 count++;
756                         }
757                         min = I40E_MAX_VF;
758                 } else {
759                         return -1;
760                 }
761                 floating_veb_value = end + 1;
762         } while (*end != '\0');
763
764         if (count == 0)
765                 return -1;
766
767         return 0;
768 }
769
770 static void
771 config_vf_floating_veb(struct rte_devargs *devargs,
772                        uint16_t floating_veb,
773                        bool *vf_floating_veb)
774 {
775         struct rte_kvargs *kvlist;
776         int i;
777         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
778
779         if (!floating_veb)
780                 return;
781         /* All the VFs attach to the floating VEB by default
782          * when the floating VEB is enabled.
783          */
784         for (i = 0; i < I40E_MAX_VF; i++)
785                 vf_floating_veb[i] = true;
786
787         if (devargs == NULL)
788                 return;
789
790         kvlist = rte_kvargs_parse(devargs->args, NULL);
791         if (kvlist == NULL)
792                 return;
793
794         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
795                 rte_kvargs_free(kvlist);
796                 return;
797         }
798         /* When the floating_veb_list parameter exists, all the VFs
799          * will attach to the legacy VEB firstly, then configure VFs
800          * to the floating VEB according to the floating_veb_list.
801          */
802         if (rte_kvargs_process(kvlist, floating_veb_list,
803                                floating_veb_list_handler,
804                                vf_floating_veb) < 0) {
805                 rte_kvargs_free(kvlist);
806                 return;
807         }
808         rte_kvargs_free(kvlist);
809 }
810
811 static int
812 i40e_check_floating_handler(__rte_unused const char *key,
813                             const char *value,
814                             __rte_unused void *opaque)
815 {
816         if (strcmp(value, "1"))
817                 return -1;
818
819         return 0;
820 }
821
822 static int
823 is_floating_veb_supported(struct rte_devargs *devargs)
824 {
825         struct rte_kvargs *kvlist;
826         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
827
828         if (devargs == NULL)
829                 return 0;
830
831         kvlist = rte_kvargs_parse(devargs->args, NULL);
832         if (kvlist == NULL)
833                 return 0;
834
835         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
836                 rte_kvargs_free(kvlist);
837                 return 0;
838         }
839         /* Floating VEB is enabled when there's key-value:
840          * enable_floating_veb=1
841          */
842         if (rte_kvargs_process(kvlist, floating_veb_key,
843                                i40e_check_floating_handler, NULL) < 0) {
844                 rte_kvargs_free(kvlist);
845                 return 0;
846         }
847         rte_kvargs_free(kvlist);
848
849         return 1;
850 }
851
852 static void
853 config_floating_veb(struct rte_eth_dev *dev)
854 {
855         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
856         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
857         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858
859         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
860
861         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
862                 pf->floating_veb =
863                         is_floating_veb_supported(pci_dev->device.devargs);
864                 config_vf_floating_veb(pci_dev->device.devargs,
865                                        pf->floating_veb,
866                                        pf->floating_veb_list);
867         } else {
868                 pf->floating_veb = false;
869         }
870 }
871
872 #define I40E_L2_TAGS_S_TAG_SHIFT 1
873 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
874
875 static int
876 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
877 {
878         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
879         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
880         char ethertype_hash_name[RTE_HASH_NAMESIZE];
881         int ret;
882
883         struct rte_hash_parameters ethertype_hash_params = {
884                 .name = ethertype_hash_name,
885                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
886                 .key_len = sizeof(struct i40e_ethertype_filter_input),
887                 .hash_func = rte_hash_crc,
888                 .hash_func_init_val = 0,
889                 .socket_id = rte_socket_id(),
890         };
891
892         /* Initialize ethertype filter rule list and hash */
893         TAILQ_INIT(&ethertype_rule->ethertype_list);
894         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
895                  "ethertype_%s", dev->device->name);
896         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
897         if (!ethertype_rule->hash_table) {
898                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
899                 return -EINVAL;
900         }
901         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
902                                        sizeof(struct i40e_ethertype_filter *) *
903                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
904                                        0);
905         if (!ethertype_rule->hash_map) {
906                 PMD_INIT_LOG(ERR,
907                              "Failed to allocate memory for ethertype hash map!");
908                 ret = -ENOMEM;
909                 goto err_ethertype_hash_map_alloc;
910         }
911
912         return 0;
913
914 err_ethertype_hash_map_alloc:
915         rte_hash_free(ethertype_rule->hash_table);
916
917         return ret;
918 }
919
920 static int
921 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
922 {
923         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
924         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
925         char tunnel_hash_name[RTE_HASH_NAMESIZE];
926         int ret;
927
928         struct rte_hash_parameters tunnel_hash_params = {
929                 .name = tunnel_hash_name,
930                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
931                 .key_len = sizeof(struct i40e_tunnel_filter_input),
932                 .hash_func = rte_hash_crc,
933                 .hash_func_init_val = 0,
934                 .socket_id = rte_socket_id(),
935         };
936
937         /* Initialize tunnel filter rule list and hash */
938         TAILQ_INIT(&tunnel_rule->tunnel_list);
939         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
940                  "tunnel_%s", dev->device->name);
941         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
942         if (!tunnel_rule->hash_table) {
943                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
944                 return -EINVAL;
945         }
946         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
947                                     sizeof(struct i40e_tunnel_filter *) *
948                                     I40E_MAX_TUNNEL_FILTER_NUM,
949                                     0);
950         if (!tunnel_rule->hash_map) {
951                 PMD_INIT_LOG(ERR,
952                              "Failed to allocate memory for tunnel hash map!");
953                 ret = -ENOMEM;
954                 goto err_tunnel_hash_map_alloc;
955         }
956
957         return 0;
958
959 err_tunnel_hash_map_alloc:
960         rte_hash_free(tunnel_rule->hash_table);
961
962         return ret;
963 }
964
965 static int
966 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
967 {
968         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
969         struct i40e_fdir_info *fdir_info = &pf->fdir;
970         char fdir_hash_name[RTE_HASH_NAMESIZE];
971         int ret;
972
973         struct rte_hash_parameters fdir_hash_params = {
974                 .name = fdir_hash_name,
975                 .entries = I40E_MAX_FDIR_FILTER_NUM,
976                 .key_len = sizeof(struct i40e_fdir_input),
977                 .hash_func = rte_hash_crc,
978                 .hash_func_init_val = 0,
979                 .socket_id = rte_socket_id(),
980         };
981
982         /* Initialize flow director filter rule list and hash */
983         TAILQ_INIT(&fdir_info->fdir_list);
984         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
985                  "fdir_%s", dev->device->name);
986         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
987         if (!fdir_info->hash_table) {
988                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
989                 return -EINVAL;
990         }
991         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
992                                           sizeof(struct i40e_fdir_filter *) *
993                                           I40E_MAX_FDIR_FILTER_NUM,
994                                           0);
995         if (!fdir_info->hash_map) {
996                 PMD_INIT_LOG(ERR,
997                              "Failed to allocate memory for fdir hash map!");
998                 ret = -ENOMEM;
999                 goto err_fdir_hash_map_alloc;
1000         }
1001         return 0;
1002
1003 err_fdir_hash_map_alloc:
1004         rte_hash_free(fdir_info->hash_table);
1005
1006         return ret;
1007 }
1008
1009 static void
1010 i40e_init_customized_info(struct i40e_pf *pf)
1011 {
1012         int i;
1013
1014         /* Initialize customized pctype */
1015         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1016                 pf->customized_pctype[i].index = i;
1017                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1018                 pf->customized_pctype[i].valid = false;
1019         }
1020
1021         pf->gtp_support = false;
1022 }
1023
1024 void
1025 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1026 {
1027         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1028         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1029         struct i40e_queue_regions *info = &pf->queue_region;
1030         uint16_t i;
1031
1032         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1033                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1034
1035         memset(info, 0, sizeof(struct i40e_queue_regions));
1036 }
1037
1038 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
1039
1040 static int
1041 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1042                                const char *value,
1043                                void *opaque)
1044 {
1045         struct i40e_pf *pf;
1046         unsigned long support_multi_driver;
1047         char *end;
1048
1049         pf = (struct i40e_pf *)opaque;
1050
1051         errno = 0;
1052         support_multi_driver = strtoul(value, &end, 10);
1053         if (errno != 0 || end == value || *end != 0) {
1054                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1055                 return -(EINVAL);
1056         }
1057
1058         if (support_multi_driver == 1 || support_multi_driver == 0)
1059                 pf->support_multi_driver = (bool)support_multi_driver;
1060         else
1061                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1062                             "enable global configuration by default."
1063                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1064         return 0;
1065 }
1066
1067 static int
1068 i40e_support_multi_driver(struct rte_eth_dev *dev)
1069 {
1070         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1071         static const char *const valid_keys[] = {
1072                 ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
1073         struct rte_kvargs *kvlist;
1074
1075         /* Enable global configuration by default */
1076         pf->support_multi_driver = false;
1077
1078         if (!dev->device->devargs)
1079                 return 0;
1080
1081         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1082         if (!kvlist)
1083                 return -EINVAL;
1084
1085         if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
1086                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1087                             "the first invalid or last valid one is used !",
1088                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1089
1090         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1091                                i40e_parse_multi_drv_handler, pf) < 0) {
1092                 rte_kvargs_free(kvlist);
1093                 return -EINVAL;
1094         }
1095
1096         rte_kvargs_free(kvlist);
1097         return 0;
1098 }
1099
1100 static int
1101 eth_i40e_dev_init(struct rte_eth_dev *dev)
1102 {
1103         struct rte_pci_device *pci_dev;
1104         struct rte_intr_handle *intr_handle;
1105         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1106         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1107         struct i40e_vsi *vsi;
1108         int ret;
1109         uint32_t len;
1110         uint8_t aq_fail = 0;
1111
1112         PMD_INIT_FUNC_TRACE();
1113
1114         dev->dev_ops = &i40e_eth_dev_ops;
1115         dev->rx_pkt_burst = i40e_recv_pkts;
1116         dev->tx_pkt_burst = i40e_xmit_pkts;
1117         dev->tx_pkt_prepare = i40e_prep_pkts;
1118
1119         /* for secondary processes, we don't initialise any further as primary
1120          * has already done this work. Only check we don't need a different
1121          * RX function */
1122         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1123                 i40e_set_rx_function(dev);
1124                 i40e_set_tx_function(dev);
1125                 return 0;
1126         }
1127         i40e_set_default_ptype_table(dev);
1128         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1129         intr_handle = &pci_dev->intr_handle;
1130
1131         rte_eth_copy_pci_info(dev, pci_dev);
1132
1133         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1134         pf->adapter->eth_dev = dev;
1135         pf->dev_data = dev->data;
1136
1137         hw->back = I40E_PF_TO_ADAPTER(pf);
1138         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1139         if (!hw->hw_addr) {
1140                 PMD_INIT_LOG(ERR,
1141                         "Hardware is not available, as address is NULL");
1142                 return -ENODEV;
1143         }
1144
1145         hw->vendor_id = pci_dev->id.vendor_id;
1146         hw->device_id = pci_dev->id.device_id;
1147         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1148         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1149         hw->bus.device = pci_dev->addr.devid;
1150         hw->bus.func = pci_dev->addr.function;
1151         hw->adapter_stopped = 0;
1152
1153         /* Check if need to support multi-driver */
1154         i40e_support_multi_driver(dev);
1155
1156         /* Make sure all is clean before doing PF reset */
1157         i40e_clear_hw(hw);
1158
1159         /* Initialize the hardware */
1160         i40e_hw_init(dev);
1161
1162         /* Reset here to make sure all is clean for each PF */
1163         ret = i40e_pf_reset(hw);
1164         if (ret) {
1165                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1166                 return ret;
1167         }
1168
1169         /* Initialize the shared code (base driver) */
1170         ret = i40e_init_shared_code(hw);
1171         if (ret) {
1172                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1173                 return ret;
1174         }
1175
1176         i40e_config_automask(pf);
1177
1178         i40e_set_default_pctype_table(dev);
1179
1180         /*
1181          * To work around the NVM issue, initialize registers
1182          * for packet type of QinQ by software.
1183          * It should be removed once issues are fixed in NVM.
1184          */
1185         if (!pf->support_multi_driver)
1186                 i40e_GLQF_reg_init(hw);
1187
1188         /* Initialize the input set for filters (hash and fd) to default value */
1189         i40e_filter_input_set_init(pf);
1190
1191         /* Initialize the parameters for adminq */
1192         i40e_init_adminq_parameter(hw);
1193         ret = i40e_init_adminq(hw);
1194         if (ret != I40E_SUCCESS) {
1195                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1196                 return -EIO;
1197         }
1198         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1199                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1200                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1201                      ((hw->nvm.version >> 12) & 0xf),
1202                      ((hw->nvm.version >> 4) & 0xff),
1203                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1204
1205         /* initialise the L3_MAP register */
1206         if (!pf->support_multi_driver) {
1207                 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1208                                                    0x00000028,  NULL);
1209                 if (ret)
1210                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1211                                      ret);
1212                 PMD_INIT_LOG(DEBUG,
1213                              "Global register 0x%08x is changed with 0x28",
1214                              I40E_GLQF_L3_MAP(40));
1215                 i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
1216         }
1217
1218         /* Need the special FW version to support floating VEB */
1219         config_floating_veb(dev);
1220         /* Clear PXE mode */
1221         i40e_clear_pxe_mode(hw);
1222         i40e_dev_sync_phy_type(hw);
1223
1224         /*
1225          * On X710, performance number is far from the expectation on recent
1226          * firmware versions. The fix for this issue may not be integrated in
1227          * the following firmware version. So the workaround in software driver
1228          * is needed. It needs to modify the initial values of 3 internal only
1229          * registers. Note that the workaround can be removed when it is fixed
1230          * in firmware in the future.
1231          */
1232         i40e_configure_registers(hw);
1233
1234         /* Get hw capabilities */
1235         ret = i40e_get_cap(hw);
1236         if (ret != I40E_SUCCESS) {
1237                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1238                 goto err_get_capabilities;
1239         }
1240
1241         /* Initialize parameters for PF */
1242         ret = i40e_pf_parameter_init(dev);
1243         if (ret != 0) {
1244                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1245                 goto err_parameter_init;
1246         }
1247
1248         /* Initialize the queue management */
1249         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1250         if (ret < 0) {
1251                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1252                 goto err_qp_pool_init;
1253         }
1254         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1255                                 hw->func_caps.num_msix_vectors - 1);
1256         if (ret < 0) {
1257                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1258                 goto err_msix_pool_init;
1259         }
1260
1261         /* Initialize lan hmc */
1262         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1263                                 hw->func_caps.num_rx_qp, 0, 0);
1264         if (ret != I40E_SUCCESS) {
1265                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1266                 goto err_init_lan_hmc;
1267         }
1268
1269         /* Configure lan hmc */
1270         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1271         if (ret != I40E_SUCCESS) {
1272                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1273                 goto err_configure_lan_hmc;
1274         }
1275
1276         /* Get and check the mac address */
1277         i40e_get_mac_addr(hw, hw->mac.addr);
1278         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1279                 PMD_INIT_LOG(ERR, "mac address is not valid");
1280                 ret = -EIO;
1281                 goto err_get_mac_addr;
1282         }
1283         /* Copy the permanent MAC address */
1284         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1285                         (struct ether_addr *) hw->mac.perm_addr);
1286
1287         /* Disable flow control */
1288         hw->fc.requested_mode = I40E_FC_NONE;
1289         i40e_set_fc(hw, &aq_fail, TRUE);
1290
1291         /* Set the global registers with default ether type value */
1292         if (!pf->support_multi_driver) {
1293                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1294                                          ETHER_TYPE_VLAN);
1295                 if (ret != I40E_SUCCESS) {
1296                         PMD_INIT_LOG(ERR,
1297                                      "Failed to set the default outer "
1298                                      "VLAN ether type");
1299                         goto err_setup_pf_switch;
1300                 }
1301         }
1302
1303         /* PF setup, which includes VSI setup */
1304         ret = i40e_pf_setup(pf);
1305         if (ret) {
1306                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1307                 goto err_setup_pf_switch;
1308         }
1309
1310         /* reset all stats of the device, including pf and main vsi */
1311         i40e_dev_stats_reset(dev);
1312
1313         vsi = pf->main_vsi;
1314
1315         /* Disable double vlan by default */
1316         i40e_vsi_config_double_vlan(vsi, FALSE);
1317
1318         /* Disable S-TAG identification when floating_veb is disabled */
1319         if (!pf->floating_veb) {
1320                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1321                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1322                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1323                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1324                 }
1325         }
1326
1327         if (!vsi->max_macaddrs)
1328                 len = ETHER_ADDR_LEN;
1329         else
1330                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1331
1332         /* Should be after VSI initialized */
1333         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1334         if (!dev->data->mac_addrs) {
1335                 PMD_INIT_LOG(ERR,
1336                         "Failed to allocated memory for storing mac address");
1337                 goto err_mac_alloc;
1338         }
1339         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1340                                         &dev->data->mac_addrs[0]);
1341
1342         /* Init dcb to sw mode by default */
1343         ret = i40e_dcb_init_configure(dev, TRUE);
1344         if (ret != I40E_SUCCESS) {
1345                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1346                 pf->flags &= ~I40E_FLAG_DCB;
1347         }
1348         /* Update HW struct after DCB configuration */
1349         i40e_get_cap(hw);
1350
1351         /* initialize pf host driver to setup SRIOV resource if applicable */
1352         i40e_pf_host_init(dev);
1353
1354         /* register callback func to eal lib */
1355         rte_intr_callback_register(intr_handle,
1356                                    i40e_dev_interrupt_handler, dev);
1357
1358         /* configure and enable device interrupt */
1359         i40e_pf_config_irq0(hw, TRUE);
1360         i40e_pf_enable_irq0(hw);
1361
1362         /* enable uio intr after callback register */
1363         rte_intr_enable(intr_handle);
1364
1365         /* By default disable flexible payload in global configuration */
1366         if (!pf->support_multi_driver)
1367                 i40e_flex_payload_reg_set_default(hw);
1368
1369         /*
1370          * Add an ethertype filter to drop all flow control frames transmitted
1371          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1372          * frames to wire.
1373          */
1374         i40e_add_tx_flow_control_drop_filter(pf);
1375
1376         /* Set the max frame size to 0x2600 by default,
1377          * in case other drivers changed the default value.
1378          */
1379         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1380
1381         /* initialize mirror rule list */
1382         TAILQ_INIT(&pf->mirror_list);
1383
1384         /* initialize Traffic Manager configuration */
1385         i40e_tm_conf_init(dev);
1386
1387         /* Initialize customized information */
1388         i40e_init_customized_info(pf);
1389
1390         ret = i40e_init_ethtype_filter_list(dev);
1391         if (ret < 0)
1392                 goto err_init_ethtype_filter_list;
1393         ret = i40e_init_tunnel_filter_list(dev);
1394         if (ret < 0)
1395                 goto err_init_tunnel_filter_list;
1396         ret = i40e_init_fdir_filter_list(dev);
1397         if (ret < 0)
1398                 goto err_init_fdir_filter_list;
1399
1400         /* initialize queue region configuration */
1401         i40e_init_queue_region_conf(dev);
1402
1403         /* initialize rss configuration from rte_flow */
1404         memset(&pf->rss_info, 0,
1405                 sizeof(struct i40e_rte_flow_rss_conf));
1406
1407         return 0;
1408
1409 err_init_fdir_filter_list:
1410         rte_free(pf->tunnel.hash_table);
1411         rte_free(pf->tunnel.hash_map);
1412 err_init_tunnel_filter_list:
1413         rte_free(pf->ethertype.hash_table);
1414         rte_free(pf->ethertype.hash_map);
1415 err_init_ethtype_filter_list:
1416         rte_free(dev->data->mac_addrs);
1417 err_mac_alloc:
1418         i40e_vsi_release(pf->main_vsi);
1419 err_setup_pf_switch:
1420 err_get_mac_addr:
1421 err_configure_lan_hmc:
1422         (void)i40e_shutdown_lan_hmc(hw);
1423 err_init_lan_hmc:
1424         i40e_res_pool_destroy(&pf->msix_pool);
1425 err_msix_pool_init:
1426         i40e_res_pool_destroy(&pf->qp_pool);
1427 err_qp_pool_init:
1428 err_parameter_init:
1429 err_get_capabilities:
1430         (void)i40e_shutdown_adminq(hw);
1431
1432         return ret;
1433 }
1434
1435 static void
1436 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1437 {
1438         struct i40e_ethertype_filter *p_ethertype;
1439         struct i40e_ethertype_rule *ethertype_rule;
1440
1441         ethertype_rule = &pf->ethertype;
1442         /* Remove all ethertype filter rules and hash */
1443         if (ethertype_rule->hash_map)
1444                 rte_free(ethertype_rule->hash_map);
1445         if (ethertype_rule->hash_table)
1446                 rte_hash_free(ethertype_rule->hash_table);
1447
1448         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1449                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1450                              p_ethertype, rules);
1451                 rte_free(p_ethertype);
1452         }
1453 }
1454
1455 static void
1456 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1457 {
1458         struct i40e_tunnel_filter *p_tunnel;
1459         struct i40e_tunnel_rule *tunnel_rule;
1460
1461         tunnel_rule = &pf->tunnel;
1462         /* Remove all tunnel director rules and hash */
1463         if (tunnel_rule->hash_map)
1464                 rte_free(tunnel_rule->hash_map);
1465         if (tunnel_rule->hash_table)
1466                 rte_hash_free(tunnel_rule->hash_table);
1467
1468         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1469                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1470                 rte_free(p_tunnel);
1471         }
1472 }
1473
1474 static void
1475 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1476 {
1477         struct i40e_fdir_filter *p_fdir;
1478         struct i40e_fdir_info *fdir_info;
1479
1480         fdir_info = &pf->fdir;
1481         /* Remove all flow director rules and hash */
1482         if (fdir_info->hash_map)
1483                 rte_free(fdir_info->hash_map);
1484         if (fdir_info->hash_table)
1485                 rte_hash_free(fdir_info->hash_table);
1486
1487         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1488                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1489                 rte_free(p_fdir);
1490         }
1491 }
1492
1493 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1494 {
1495         /*
1496          * Disable by default flexible payload
1497          * for corresponding L2/L3/L4 layers.
1498          */
1499         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1500         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1501         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1502         i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
1503 }
1504
1505 static int
1506 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1507 {
1508         struct i40e_pf *pf;
1509         struct rte_pci_device *pci_dev;
1510         struct rte_intr_handle *intr_handle;
1511         struct i40e_hw *hw;
1512         struct i40e_filter_control_settings settings;
1513         struct rte_flow *p_flow;
1514         int ret;
1515         uint8_t aq_fail = 0;
1516         int retries = 0;
1517
1518         PMD_INIT_FUNC_TRACE();
1519
1520         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1521                 return 0;
1522
1523         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1524         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1525         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1526         intr_handle = &pci_dev->intr_handle;
1527
1528         if (hw->adapter_stopped == 0)
1529                 i40e_dev_close(dev);
1530
1531         dev->dev_ops = NULL;
1532         dev->rx_pkt_burst = NULL;
1533         dev->tx_pkt_burst = NULL;
1534
1535         /* Clear PXE mode */
1536         i40e_clear_pxe_mode(hw);
1537
1538         /* Unconfigure filter control */
1539         memset(&settings, 0, sizeof(settings));
1540         ret = i40e_set_filter_control(hw, &settings);
1541         if (ret)
1542                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1543                                         ret);
1544
1545         /* Disable flow control */
1546         hw->fc.requested_mode = I40E_FC_NONE;
1547         i40e_set_fc(hw, &aq_fail, TRUE);
1548
1549         /* uninitialize pf host driver */
1550         i40e_pf_host_uninit(dev);
1551
1552         rte_free(dev->data->mac_addrs);
1553         dev->data->mac_addrs = NULL;
1554
1555         /* disable uio intr before callback unregister */
1556         rte_intr_disable(intr_handle);
1557
1558         /* unregister callback func to eal lib */
1559         do {
1560                 ret = rte_intr_callback_unregister(intr_handle,
1561                                 i40e_dev_interrupt_handler, dev);
1562                 if (ret >= 0) {
1563                         break;
1564                 } else if (ret != -EAGAIN) {
1565                         PMD_INIT_LOG(ERR,
1566                                  "intr callback unregister failed: %d",
1567                                  ret);
1568                         return ret;
1569                 }
1570                 i40e_msec_delay(500);
1571         } while (retries++ < 5);
1572
1573         i40e_rm_ethtype_filter_list(pf);
1574         i40e_rm_tunnel_filter_list(pf);
1575         i40e_rm_fdir_filter_list(pf);
1576
1577         /* Remove all flows */
1578         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1579                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1580                 rte_free(p_flow);
1581         }
1582
1583         /* Remove all Traffic Manager configuration */
1584         i40e_tm_conf_uninit(dev);
1585
1586         return 0;
1587 }
1588
1589 static int
1590 i40e_dev_configure(struct rte_eth_dev *dev)
1591 {
1592         struct i40e_adapter *ad =
1593                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1594         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1595         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1596         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1597         int i, ret;
1598
1599         ret = i40e_dev_sync_phy_type(hw);
1600         if (ret)
1601                 return ret;
1602
1603         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1604          * bulk allocation or vector Rx preconditions we will reset it.
1605          */
1606         ad->rx_bulk_alloc_allowed = true;
1607         ad->rx_vec_allowed = true;
1608         ad->tx_simple_allowed = true;
1609         ad->tx_vec_allowed = true;
1610
1611         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1612                 ret = i40e_fdir_setup(pf);
1613                 if (ret != I40E_SUCCESS) {
1614                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1615                         return -ENOTSUP;
1616                 }
1617                 ret = i40e_fdir_configure(dev);
1618                 if (ret < 0) {
1619                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1620                         goto err;
1621                 }
1622         } else
1623                 i40e_fdir_teardown(pf);
1624
1625         ret = i40e_dev_init_vlan(dev);
1626         if (ret < 0)
1627                 goto err;
1628
1629         /* VMDQ setup.
1630          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1631          *  RSS setting have different requirements.
1632          *  General PMD driver call sequence are NIC init, configure,
1633          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1634          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1635          *  applicable. So, VMDQ setting has to be done before
1636          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1637          *  For RSS setting, it will try to calculate actual configured RX queue
1638          *  number, which will be available after rx_queue_setup(). dev_start()
1639          *  function is good to place RSS setup.
1640          */
1641         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1642                 ret = i40e_vmdq_setup(dev);
1643                 if (ret)
1644                         goto err;
1645         }
1646
1647         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1648                 ret = i40e_dcb_setup(dev);
1649                 if (ret) {
1650                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1651                         goto err_dcb;
1652                 }
1653         }
1654
1655         TAILQ_INIT(&pf->flow_list);
1656
1657         return 0;
1658
1659 err_dcb:
1660         /* need to release vmdq resource if exists */
1661         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1662                 i40e_vsi_release(pf->vmdq[i].vsi);
1663                 pf->vmdq[i].vsi = NULL;
1664         }
1665         rte_free(pf->vmdq);
1666         pf->vmdq = NULL;
1667 err:
1668         /* need to release fdir resource if exists */
1669         i40e_fdir_teardown(pf);
1670         return ret;
1671 }
1672
1673 void
1674 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1675 {
1676         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1677         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1678         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1679         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1680         uint16_t msix_vect = vsi->msix_intr;
1681         uint16_t i;
1682
1683         for (i = 0; i < vsi->nb_qps; i++) {
1684                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1685                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1686                 rte_wmb();
1687         }
1688
1689         if (vsi->type != I40E_VSI_SRIOV) {
1690                 if (!rte_intr_allow_others(intr_handle)) {
1691                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1692                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1693                         I40E_WRITE_REG(hw,
1694                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1695                                        0);
1696                 } else {
1697                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1698                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1699                         I40E_WRITE_REG(hw,
1700                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1701                                                        msix_vect - 1), 0);
1702                 }
1703         } else {
1704                 uint32_t reg;
1705                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1706                         vsi->user_param + (msix_vect - 1);
1707
1708                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1709                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1710         }
1711         I40E_WRITE_FLUSH(hw);
1712 }
1713
1714 static void
1715 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1716                        int base_queue, int nb_queue,
1717                        uint16_t itr_idx)
1718 {
1719         int i;
1720         uint32_t val;
1721         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1722         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1723
1724         /* Bind all RX queues to allocated MSIX interrupt */
1725         for (i = 0; i < nb_queue; i++) {
1726                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1727                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1728                         ((base_queue + i + 1) <<
1729                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1730                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1731                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1732
1733                 if (i == nb_queue - 1)
1734                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1735                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1736         }
1737
1738         /* Write first RX queue to Link list register as the head element */
1739         if (vsi->type != I40E_VSI_SRIOV) {
1740                 uint16_t interval =
1741                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
1742                                                pf->support_multi_driver);
1743
1744                 if (msix_vect == I40E_MISC_VEC_ID) {
1745                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1746                                        (base_queue <<
1747                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1748                                        (0x0 <<
1749                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1750                         I40E_WRITE_REG(hw,
1751                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1752                                        interval);
1753                 } else {
1754                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1755                                        (base_queue <<
1756                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1757                                        (0x0 <<
1758                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1759                         I40E_WRITE_REG(hw,
1760                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1761                                                        msix_vect - 1),
1762                                        interval);
1763                 }
1764         } else {
1765                 uint32_t reg;
1766
1767                 if (msix_vect == I40E_MISC_VEC_ID) {
1768                         I40E_WRITE_REG(hw,
1769                                        I40E_VPINT_LNKLST0(vsi->user_param),
1770                                        (base_queue <<
1771                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1772                                        (0x0 <<
1773                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1774                 } else {
1775                         /* num_msix_vectors_vf needs to minus irq0 */
1776                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1777                                 vsi->user_param + (msix_vect - 1);
1778
1779                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1780                                        (base_queue <<
1781                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1782                                        (0x0 <<
1783                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1784                 }
1785         }
1786
1787         I40E_WRITE_FLUSH(hw);
1788 }
1789
1790 void
1791 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1792 {
1793         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1794         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1795         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1796         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1797         uint16_t msix_vect = vsi->msix_intr;
1798         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1799         uint16_t queue_idx = 0;
1800         int record = 0;
1801         int i;
1802
1803         for (i = 0; i < vsi->nb_qps; i++) {
1804                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1805                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1806         }
1807
1808         /* VF bind interrupt */
1809         if (vsi->type == I40E_VSI_SRIOV) {
1810                 __vsi_queues_bind_intr(vsi, msix_vect,
1811                                        vsi->base_queue, vsi->nb_qps,
1812                                        itr_idx);
1813                 return;
1814         }
1815
1816         /* PF & VMDq bind interrupt */
1817         if (rte_intr_dp_is_en(intr_handle)) {
1818                 if (vsi->type == I40E_VSI_MAIN) {
1819                         queue_idx = 0;
1820                         record = 1;
1821                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1822                         struct i40e_vsi *main_vsi =
1823                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1824                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1825                         record = 1;
1826                 }
1827         }
1828
1829         for (i = 0; i < vsi->nb_used_qps; i++) {
1830                 if (nb_msix <= 1) {
1831                         if (!rte_intr_allow_others(intr_handle))
1832                                 /* allow to share MISC_VEC_ID */
1833                                 msix_vect = I40E_MISC_VEC_ID;
1834
1835                         /* no enough msix_vect, map all to one */
1836                         __vsi_queues_bind_intr(vsi, msix_vect,
1837                                                vsi->base_queue + i,
1838                                                vsi->nb_used_qps - i,
1839                                                itr_idx);
1840                         for (; !!record && i < vsi->nb_used_qps; i++)
1841                                 intr_handle->intr_vec[queue_idx + i] =
1842                                         msix_vect;
1843                         break;
1844                 }
1845                 /* 1:1 queue/msix_vect mapping */
1846                 __vsi_queues_bind_intr(vsi, msix_vect,
1847                                        vsi->base_queue + i, 1,
1848                                        itr_idx);
1849                 if (!!record)
1850                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1851
1852                 msix_vect++;
1853                 nb_msix--;
1854         }
1855 }
1856
1857 static void
1858 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1859 {
1860         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1861         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1862         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1863         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1864         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1865         uint16_t msix_intr, i;
1866
1867         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1868                 for (i = 0; i < vsi->nb_msix; i++) {
1869                         msix_intr = vsi->msix_intr + i;
1870                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1871                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1872                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1873                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1874                 }
1875         else
1876                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1877                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1878                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1879                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1880
1881         I40E_WRITE_FLUSH(hw);
1882 }
1883
1884 static void
1885 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1886 {
1887         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1888         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1889         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1890         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1891         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1892         uint16_t msix_intr, i;
1893
1894         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1895                 for (i = 0; i < vsi->nb_msix; i++) {
1896                         msix_intr = vsi->msix_intr + i;
1897                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1898                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1899                 }
1900         else
1901                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1902                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1903
1904         I40E_WRITE_FLUSH(hw);
1905 }
1906
1907 static inline uint8_t
1908 i40e_parse_link_speeds(uint16_t link_speeds)
1909 {
1910         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1911
1912         if (link_speeds & ETH_LINK_SPEED_40G)
1913                 link_speed |= I40E_LINK_SPEED_40GB;
1914         if (link_speeds & ETH_LINK_SPEED_25G)
1915                 link_speed |= I40E_LINK_SPEED_25GB;
1916         if (link_speeds & ETH_LINK_SPEED_20G)
1917                 link_speed |= I40E_LINK_SPEED_20GB;
1918         if (link_speeds & ETH_LINK_SPEED_10G)
1919                 link_speed |= I40E_LINK_SPEED_10GB;
1920         if (link_speeds & ETH_LINK_SPEED_1G)
1921                 link_speed |= I40E_LINK_SPEED_1GB;
1922         if (link_speeds & ETH_LINK_SPEED_100M)
1923                 link_speed |= I40E_LINK_SPEED_100MB;
1924
1925         return link_speed;
1926 }
1927
1928 static int
1929 i40e_phy_conf_link(struct i40e_hw *hw,
1930                    uint8_t abilities,
1931                    uint8_t force_speed,
1932                    bool is_up)
1933 {
1934         enum i40e_status_code status;
1935         struct i40e_aq_get_phy_abilities_resp phy_ab;
1936         struct i40e_aq_set_phy_config phy_conf;
1937         enum i40e_aq_phy_type cnt;
1938         uint32_t phy_type_mask = 0;
1939
1940         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1941                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1942                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1943                         I40E_AQ_PHY_FLAG_LOW_POWER;
1944         const uint8_t advt = I40E_LINK_SPEED_40GB |
1945                         I40E_LINK_SPEED_25GB |
1946                         I40E_LINK_SPEED_10GB |
1947                         I40E_LINK_SPEED_1GB |
1948                         I40E_LINK_SPEED_100MB;
1949         int ret = -ENOTSUP;
1950
1951
1952         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1953                                               NULL);
1954         if (status)
1955                 return ret;
1956
1957         /* If link already up, no need to set up again */
1958         if (is_up && phy_ab.phy_type != 0)
1959                 return I40E_SUCCESS;
1960
1961         memset(&phy_conf, 0, sizeof(phy_conf));
1962
1963         /* bits 0-2 use the values from get_phy_abilities_resp */
1964         abilities &= ~mask;
1965         abilities |= phy_ab.abilities & mask;
1966
1967         /* update ablities and speed */
1968         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1969                 phy_conf.link_speed = advt;
1970         else
1971                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
1972
1973         phy_conf.abilities = abilities;
1974
1975
1976
1977         /* To enable link, phy_type mask needs to include each type */
1978         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
1979                 phy_type_mask |= 1 << cnt;
1980
1981         /* use get_phy_abilities_resp value for the rest */
1982         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
1983         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
1984                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
1985                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
1986         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1987         phy_conf.eee_capability = phy_ab.eee_capability;
1988         phy_conf.eeer = phy_ab.eeer_val;
1989         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1990
1991         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1992                     phy_ab.abilities, phy_ab.link_speed);
1993         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1994                     phy_conf.abilities, phy_conf.link_speed);
1995
1996         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1997         if (status)
1998                 return ret;
1999
2000         return I40E_SUCCESS;
2001 }
2002
2003 static int
2004 i40e_apply_link_speed(struct rte_eth_dev *dev)
2005 {
2006         uint8_t speed;
2007         uint8_t abilities = 0;
2008         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2009         struct rte_eth_conf *conf = &dev->data->dev_conf;
2010
2011         speed = i40e_parse_link_speeds(conf->link_speeds);
2012         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2013         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
2014                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2015         abilities |= I40E_AQ_PHY_LINK_ENABLED;
2016
2017         return i40e_phy_conf_link(hw, abilities, speed, true);
2018 }
2019
2020 static int
2021 i40e_dev_start(struct rte_eth_dev *dev)
2022 {
2023         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2024         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2025         struct i40e_vsi *main_vsi = pf->main_vsi;
2026         int ret, i;
2027         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2028         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2029         uint32_t intr_vector = 0;
2030         struct i40e_vsi *vsi;
2031
2032         hw->adapter_stopped = 0;
2033
2034         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2035                 PMD_INIT_LOG(ERR,
2036                 "Invalid link_speeds for port %u, autonegotiation disabled",
2037                               dev->data->port_id);
2038                 return -EINVAL;
2039         }
2040
2041         rte_intr_disable(intr_handle);
2042
2043         if ((rte_intr_cap_multiple(intr_handle) ||
2044              !RTE_ETH_DEV_SRIOV(dev).active) &&
2045             dev->data->dev_conf.intr_conf.rxq != 0) {
2046                 intr_vector = dev->data->nb_rx_queues;
2047                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2048                 if (ret)
2049                         return ret;
2050         }
2051
2052         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2053                 intr_handle->intr_vec =
2054                         rte_zmalloc("intr_vec",
2055                                     dev->data->nb_rx_queues * sizeof(int),
2056                                     0);
2057                 if (!intr_handle->intr_vec) {
2058                         PMD_INIT_LOG(ERR,
2059                                 "Failed to allocate %d rx_queues intr_vec",
2060                                 dev->data->nb_rx_queues);
2061                         return -ENOMEM;
2062                 }
2063         }
2064
2065         /* Initialize VSI */
2066         ret = i40e_dev_rxtx_init(pf);
2067         if (ret != I40E_SUCCESS) {
2068                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2069                 goto err_up;
2070         }
2071
2072         /* Map queues with MSIX interrupt */
2073         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2074                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2075         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2076         i40e_vsi_enable_queues_intr(main_vsi);
2077
2078         /* Map VMDQ VSI queues with MSIX interrupt */
2079         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2080                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2081                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2082                                           I40E_ITR_INDEX_DEFAULT);
2083                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2084         }
2085
2086         /* enable FDIR MSIX interrupt */
2087         if (pf->fdir.fdir_vsi) {
2088                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2089                                           I40E_ITR_INDEX_NONE);
2090                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2091         }
2092
2093         /* Enable all queues which have been configured */
2094         ret = i40e_dev_switch_queues(pf, TRUE);
2095         if (ret != I40E_SUCCESS) {
2096                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2097                 goto err_up;
2098         }
2099
2100         /* Enable receiving broadcast packets */
2101         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2102         if (ret != I40E_SUCCESS)
2103                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2104
2105         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2106                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2107                                                 true, NULL);
2108                 if (ret != I40E_SUCCESS)
2109                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2110         }
2111
2112         /* Enable the VLAN promiscuous mode. */
2113         if (pf->vfs) {
2114                 for (i = 0; i < pf->vf_num; i++) {
2115                         vsi = pf->vfs[i].vsi;
2116                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2117                                                      true, NULL);
2118                 }
2119         }
2120
2121         /* Enable mac loopback mode */
2122         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2123             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2124                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2125                 if (ret != I40E_SUCCESS) {
2126                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2127                         goto err_up;
2128                 }
2129         }
2130
2131         /* Apply link configure */
2132         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2133                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2134                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2135                                 ETH_LINK_SPEED_40G)) {
2136                 PMD_DRV_LOG(ERR, "Invalid link setting");
2137                 goto err_up;
2138         }
2139         ret = i40e_apply_link_speed(dev);
2140         if (I40E_SUCCESS != ret) {
2141                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2142                 goto err_up;
2143         }
2144
2145         if (!rte_intr_allow_others(intr_handle)) {
2146                 rte_intr_callback_unregister(intr_handle,
2147                                              i40e_dev_interrupt_handler,
2148                                              (void *)dev);
2149                 /* configure and enable device interrupt */
2150                 i40e_pf_config_irq0(hw, FALSE);
2151                 i40e_pf_enable_irq0(hw);
2152
2153                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2154                         PMD_INIT_LOG(INFO,
2155                                 "lsc won't enable because of no intr multiplex");
2156         } else {
2157                 ret = i40e_aq_set_phy_int_mask(hw,
2158                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2159                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2160                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2161                 if (ret != I40E_SUCCESS)
2162                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2163
2164                 /* Call get_link_info aq commond to enable/disable LSE */
2165                 i40e_dev_link_update(dev, 0);
2166         }
2167
2168         /* enable uio intr after callback register */
2169         rte_intr_enable(intr_handle);
2170
2171         i40e_filter_restore(pf);
2172
2173         if (pf->tm_conf.root && !pf->tm_conf.committed)
2174                 PMD_DRV_LOG(WARNING,
2175                             "please call hierarchy_commit() "
2176                             "before starting the port");
2177
2178         return I40E_SUCCESS;
2179
2180 err_up:
2181         i40e_dev_switch_queues(pf, FALSE);
2182         i40e_dev_clear_queues(dev);
2183
2184         return ret;
2185 }
2186
2187 static void
2188 i40e_dev_stop(struct rte_eth_dev *dev)
2189 {
2190         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2191         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2192         struct i40e_vsi *main_vsi = pf->main_vsi;
2193         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2194         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2195         int i;
2196
2197         if (hw->adapter_stopped == 1)
2198                 return;
2199         /* Disable all queues */
2200         i40e_dev_switch_queues(pf, FALSE);
2201
2202         /* un-map queues with interrupt registers */
2203         i40e_vsi_disable_queues_intr(main_vsi);
2204         i40e_vsi_queues_unbind_intr(main_vsi);
2205
2206         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2207                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2208                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2209         }
2210
2211         if (pf->fdir.fdir_vsi) {
2212                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2213                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2214         }
2215         /* Clear all queues and release memory */
2216         i40e_dev_clear_queues(dev);
2217
2218         /* Set link down */
2219         i40e_dev_set_link_down(dev);
2220
2221         if (!rte_intr_allow_others(intr_handle))
2222                 /* resume to the default handler */
2223                 rte_intr_callback_register(intr_handle,
2224                                            i40e_dev_interrupt_handler,
2225                                            (void *)dev);
2226
2227         /* Clean datapath event and queue/vec mapping */
2228         rte_intr_efd_disable(intr_handle);
2229         if (intr_handle->intr_vec) {
2230                 rte_free(intr_handle->intr_vec);
2231                 intr_handle->intr_vec = NULL;
2232         }
2233
2234         /* reset hierarchy commit */
2235         pf->tm_conf.committed = false;
2236
2237         hw->adapter_stopped = 1;
2238 }
2239
2240 static void
2241 i40e_dev_close(struct rte_eth_dev *dev)
2242 {
2243         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2244         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2245         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2246         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2247         struct i40e_mirror_rule *p_mirror;
2248         uint32_t reg;
2249         int i;
2250         int ret;
2251
2252         PMD_INIT_FUNC_TRACE();
2253
2254         i40e_dev_stop(dev);
2255
2256         /* Remove all mirror rules */
2257         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2258                 ret = i40e_aq_del_mirror_rule(hw,
2259                                               pf->main_vsi->veb->seid,
2260                                               p_mirror->rule_type,
2261                                               p_mirror->entries,
2262                                               p_mirror->num_entries,
2263                                               p_mirror->id);
2264                 if (ret < 0)
2265                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2266                                     "status = %d, aq_err = %d.", ret,
2267                                     hw->aq.asq_last_status);
2268
2269                 /* remove mirror software resource anyway */
2270                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2271                 rte_free(p_mirror);
2272                 pf->nb_mirror_rule--;
2273         }
2274
2275         i40e_dev_free_queues(dev);
2276
2277         /* Disable interrupt */
2278         i40e_pf_disable_irq0(hw);
2279         rte_intr_disable(intr_handle);
2280
2281         /* shutdown and destroy the HMC */
2282         i40e_shutdown_lan_hmc(hw);
2283
2284         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2285                 i40e_vsi_release(pf->vmdq[i].vsi);
2286                 pf->vmdq[i].vsi = NULL;
2287         }
2288         rte_free(pf->vmdq);
2289         pf->vmdq = NULL;
2290
2291         /* release all the existing VSIs and VEBs */
2292         i40e_fdir_teardown(pf);
2293         i40e_vsi_release(pf->main_vsi);
2294
2295         /* shutdown the adminq */
2296         i40e_aq_queue_shutdown(hw, true);
2297         i40e_shutdown_adminq(hw);
2298
2299         i40e_res_pool_destroy(&pf->qp_pool);
2300         i40e_res_pool_destroy(&pf->msix_pool);
2301
2302         /* Disable flexible payload in global configuration */
2303         if (!pf->support_multi_driver)
2304                 i40e_flex_payload_reg_set_default(hw);
2305
2306         /* force a PF reset to clean anything leftover */
2307         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2308         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2309                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2310         I40E_WRITE_FLUSH(hw);
2311 }
2312
2313 /*
2314  * Reset PF device only to re-initialize resources in PMD layer
2315  */
2316 static int
2317 i40e_dev_reset(struct rte_eth_dev *dev)
2318 {
2319         int ret;
2320
2321         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2322          * its VF to make them align with it. The detailed notification
2323          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2324          * To avoid unexpected behavior in VF, currently reset of PF with
2325          * SR-IOV activation is not supported. It might be supported later.
2326          */
2327         if (dev->data->sriov.active)
2328                 return -ENOTSUP;
2329
2330         ret = eth_i40e_dev_uninit(dev);
2331         if (ret)
2332                 return ret;
2333
2334         ret = eth_i40e_dev_init(dev);
2335
2336         return ret;
2337 }
2338
2339 static void
2340 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2341 {
2342         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2343         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2344         struct i40e_vsi *vsi = pf->main_vsi;
2345         int status;
2346
2347         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2348                                                      true, NULL, true);
2349         if (status != I40E_SUCCESS)
2350                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2351
2352         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2353                                                         TRUE, NULL);
2354         if (status != I40E_SUCCESS)
2355                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2356
2357 }
2358
2359 static void
2360 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2361 {
2362         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2363         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2364         struct i40e_vsi *vsi = pf->main_vsi;
2365         int status;
2366
2367         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2368                                                      false, NULL, true);
2369         if (status != I40E_SUCCESS)
2370                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2371
2372         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2373                                                         false, NULL);
2374         if (status != I40E_SUCCESS)
2375                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2376 }
2377
2378 static void
2379 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2380 {
2381         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2382         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2383         struct i40e_vsi *vsi = pf->main_vsi;
2384         int ret;
2385
2386         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2387         if (ret != I40E_SUCCESS)
2388                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2389 }
2390
2391 static void
2392 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2393 {
2394         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2395         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2396         struct i40e_vsi *vsi = pf->main_vsi;
2397         int ret;
2398
2399         if (dev->data->promiscuous == 1)
2400                 return; /* must remain in all_multicast mode */
2401
2402         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2403                                 vsi->seid, FALSE, NULL);
2404         if (ret != I40E_SUCCESS)
2405                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2406 }
2407
2408 /*
2409  * Set device link up.
2410  */
2411 static int
2412 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2413 {
2414         /* re-apply link speed setting */
2415         return i40e_apply_link_speed(dev);
2416 }
2417
2418 /*
2419  * Set device link down.
2420  */
2421 static int
2422 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2423 {
2424         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2425         uint8_t abilities = 0;
2426         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2427
2428         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2429         return i40e_phy_conf_link(hw, abilities, speed, false);
2430 }
2431
2432 static __rte_always_inline void
2433 update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
2434 {
2435 /* Link status registers and values*/
2436 #define I40E_PRTMAC_LINKSTA             0x001E2420
2437 #define I40E_REG_LINK_UP                0x40000080
2438 #define I40E_PRTMAC_MACC                0x001E24E0
2439 #define I40E_REG_MACC_25GB              0x00020000
2440 #define I40E_REG_SPEED_MASK             0x38000000
2441 #define I40E_REG_SPEED_100MB            0x00000000
2442 #define I40E_REG_SPEED_1GB              0x08000000
2443 #define I40E_REG_SPEED_10GB             0x10000000
2444 #define I40E_REG_SPEED_20GB             0x20000000
2445 #define I40E_REG_SPEED_25_40GB          0x18000000
2446         uint32_t link_speed;
2447         uint32_t reg_val;
2448
2449         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2450         link_speed = reg_val & I40E_REG_SPEED_MASK;
2451         reg_val &= I40E_REG_LINK_UP;
2452         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2453
2454         if (unlikely(link->link_status != 0))
2455                 return;
2456
2457         /* Parse the link status */
2458         switch (link_speed) {
2459         case I40E_REG_SPEED_100MB:
2460                 link->link_speed = ETH_SPEED_NUM_100M;
2461                 break;
2462         case I40E_REG_SPEED_1GB:
2463                 link->link_speed = ETH_SPEED_NUM_1G;
2464                 break;
2465         case I40E_REG_SPEED_10GB:
2466                 link->link_speed = ETH_SPEED_NUM_10G;
2467                 break;
2468         case I40E_REG_SPEED_20GB:
2469                 link->link_speed = ETH_SPEED_NUM_20G;
2470                 break;
2471         case I40E_REG_SPEED_25_40GB:
2472                 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2473
2474                 if (reg_val & I40E_REG_MACC_25GB)
2475                         link->link_speed = ETH_SPEED_NUM_25G;
2476                 else
2477                         link->link_speed = ETH_SPEED_NUM_40G;
2478
2479                 break;
2480         default:
2481                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2482                 break;
2483         }
2484 }
2485
2486 static __rte_always_inline void
2487 update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
2488         bool enable_lse)
2489 {
2490 #define CHECK_INTERVAL             100  /* 100ms */
2491 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2492         uint32_t rep_cnt = MAX_REPEAT_TIME;
2493         struct i40e_link_status link_status;
2494         int status;
2495
2496         memset(&link_status, 0, sizeof(link_status));
2497
2498         do {
2499                 memset(&link_status, 0, sizeof(link_status));
2500
2501                 /* Get link status information from hardware */
2502                 status = i40e_aq_get_link_info(hw, enable_lse,
2503                                                 &link_status, NULL);
2504                 if (unlikely(status != I40E_SUCCESS)) {
2505                         link->link_speed = ETH_SPEED_NUM_100M;
2506                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2507                         PMD_DRV_LOG(ERR, "Failed to get link info");
2508                         return;
2509                 }
2510
2511                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2512                 if (unlikely(link->link_status != 0))
2513                         return;
2514
2515                 rte_delay_ms(CHECK_INTERVAL);
2516         } while (--rep_cnt);
2517
2518         /* Parse the link status */
2519         switch (link_status.link_speed) {
2520         case I40E_LINK_SPEED_100MB:
2521                 link->link_speed = ETH_SPEED_NUM_100M;
2522                 break;
2523         case I40E_LINK_SPEED_1GB:
2524                 link->link_speed = ETH_SPEED_NUM_1G;
2525                 break;
2526         case I40E_LINK_SPEED_10GB:
2527                 link->link_speed = ETH_SPEED_NUM_10G;
2528                 break;
2529         case I40E_LINK_SPEED_20GB:
2530                 link->link_speed = ETH_SPEED_NUM_20G;
2531                 break;
2532         case I40E_LINK_SPEED_25GB:
2533                 link->link_speed = ETH_SPEED_NUM_25G;
2534                 break;
2535         case I40E_LINK_SPEED_40GB:
2536                 link->link_speed = ETH_SPEED_NUM_40G;
2537                 break;
2538         default:
2539                 link->link_speed = ETH_SPEED_NUM_100M;
2540                 break;
2541         }
2542 }
2543
2544 int
2545 i40e_dev_link_update(struct rte_eth_dev *dev,
2546                      int wait_to_complete)
2547 {
2548         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2549         struct rte_eth_link link;
2550         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2551         int ret;
2552
2553         memset(&link, 0, sizeof(link));
2554
2555         /* i40e uses full duplex only */
2556         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2557         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2558                         ETH_LINK_SPEED_FIXED);
2559
2560         if (!wait_to_complete)
2561                 update_link_no_wait(hw, &link);
2562         else
2563                 update_link_wait(hw, &link, enable_lse);
2564
2565         ret = rte_eth_linkstatus_set(dev, &link);
2566         i40e_notify_all_vfs_link_status(dev);
2567
2568         return ret;
2569 }
2570
2571 /* Get all the statistics of a VSI */
2572 void
2573 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2574 {
2575         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2576         struct i40e_eth_stats *nes = &vsi->eth_stats;
2577         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2578         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2579
2580         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2581                             vsi->offset_loaded, &oes->rx_bytes,
2582                             &nes->rx_bytes);
2583         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2584                             vsi->offset_loaded, &oes->rx_unicast,
2585                             &nes->rx_unicast);
2586         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2587                             vsi->offset_loaded, &oes->rx_multicast,
2588                             &nes->rx_multicast);
2589         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2590                             vsi->offset_loaded, &oes->rx_broadcast,
2591                             &nes->rx_broadcast);
2592         /* exclude CRC bytes */
2593         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2594                 nes->rx_broadcast) * ETHER_CRC_LEN;
2595
2596         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2597                             &oes->rx_discards, &nes->rx_discards);
2598         /* GLV_REPC not supported */
2599         /* GLV_RMPC not supported */
2600         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2601                             &oes->rx_unknown_protocol,
2602                             &nes->rx_unknown_protocol);
2603         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2604                             vsi->offset_loaded, &oes->tx_bytes,
2605                             &nes->tx_bytes);
2606         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2607                             vsi->offset_loaded, &oes->tx_unicast,
2608                             &nes->tx_unicast);
2609         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2610                             vsi->offset_loaded, &oes->tx_multicast,
2611                             &nes->tx_multicast);
2612         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2613                             vsi->offset_loaded,  &oes->tx_broadcast,
2614                             &nes->tx_broadcast);
2615         /* GLV_TDPC not supported */
2616         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2617                             &oes->tx_errors, &nes->tx_errors);
2618         vsi->offset_loaded = true;
2619
2620         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2621                     vsi->vsi_id);
2622         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2623         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2624         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2625         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2626         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2627         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2628                     nes->rx_unknown_protocol);
2629         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2630         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2631         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2632         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2633         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2634         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2635         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2636                     vsi->vsi_id);
2637 }
2638
2639 static void
2640 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2641 {
2642         unsigned int i;
2643         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2644         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2645
2646         /* Get rx/tx bytes of internal transfer packets */
2647         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2648                         I40E_GLV_GORCL(hw->port),
2649                         pf->offset_loaded,
2650                         &pf->internal_stats_offset.rx_bytes,
2651                         &pf->internal_stats.rx_bytes);
2652
2653         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2654                         I40E_GLV_GOTCL(hw->port),
2655                         pf->offset_loaded,
2656                         &pf->internal_stats_offset.tx_bytes,
2657                         &pf->internal_stats.tx_bytes);
2658         /* Get total internal rx packet count */
2659         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2660                             I40E_GLV_UPRCL(hw->port),
2661                             pf->offset_loaded,
2662                             &pf->internal_stats_offset.rx_unicast,
2663                             &pf->internal_stats.rx_unicast);
2664         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2665                             I40E_GLV_MPRCL(hw->port),
2666                             pf->offset_loaded,
2667                             &pf->internal_stats_offset.rx_multicast,
2668                             &pf->internal_stats.rx_multicast);
2669         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2670                             I40E_GLV_BPRCL(hw->port),
2671                             pf->offset_loaded,
2672                             &pf->internal_stats_offset.rx_broadcast,
2673                             &pf->internal_stats.rx_broadcast);
2674         /* Get total internal tx packet count */
2675         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2676                             I40E_GLV_UPTCL(hw->port),
2677                             pf->offset_loaded,
2678                             &pf->internal_stats_offset.tx_unicast,
2679                             &pf->internal_stats.tx_unicast);
2680         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2681                             I40E_GLV_MPTCL(hw->port),
2682                             pf->offset_loaded,
2683                             &pf->internal_stats_offset.tx_multicast,
2684                             &pf->internal_stats.tx_multicast);
2685         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2686                             I40E_GLV_BPTCL(hw->port),
2687                             pf->offset_loaded,
2688                             &pf->internal_stats_offset.tx_broadcast,
2689                             &pf->internal_stats.tx_broadcast);
2690
2691         /* exclude CRC size */
2692         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2693                 pf->internal_stats.rx_multicast +
2694                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2695
2696         /* Get statistics of struct i40e_eth_stats */
2697         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2698                             I40E_GLPRT_GORCL(hw->port),
2699                             pf->offset_loaded, &os->eth.rx_bytes,
2700                             &ns->eth.rx_bytes);
2701         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2702                             I40E_GLPRT_UPRCL(hw->port),
2703                             pf->offset_loaded, &os->eth.rx_unicast,
2704                             &ns->eth.rx_unicast);
2705         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2706                             I40E_GLPRT_MPRCL(hw->port),
2707                             pf->offset_loaded, &os->eth.rx_multicast,
2708                             &ns->eth.rx_multicast);
2709         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2710                             I40E_GLPRT_BPRCL(hw->port),
2711                             pf->offset_loaded, &os->eth.rx_broadcast,
2712                             &ns->eth.rx_broadcast);
2713         /* Workaround: CRC size should not be included in byte statistics,
2714          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2715          */
2716         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2717                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2718
2719         /* exclude internal rx bytes
2720          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2721          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2722          * value.
2723          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2724          */
2725         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2726                 ns->eth.rx_bytes = 0;
2727         else
2728                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2729
2730         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2731                 ns->eth.rx_unicast = 0;
2732         else
2733                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2734
2735         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2736                 ns->eth.rx_multicast = 0;
2737         else
2738                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2739
2740         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2741                 ns->eth.rx_broadcast = 0;
2742         else
2743                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2744
2745         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2746                             pf->offset_loaded, &os->eth.rx_discards,
2747                             &ns->eth.rx_discards);
2748         /* GLPRT_REPC not supported */
2749         /* GLPRT_RMPC not supported */
2750         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2751                             pf->offset_loaded,
2752                             &os->eth.rx_unknown_protocol,
2753                             &ns->eth.rx_unknown_protocol);
2754         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2755                             I40E_GLPRT_GOTCL(hw->port),
2756                             pf->offset_loaded, &os->eth.tx_bytes,
2757                             &ns->eth.tx_bytes);
2758         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2759                             I40E_GLPRT_UPTCL(hw->port),
2760                             pf->offset_loaded, &os->eth.tx_unicast,
2761                             &ns->eth.tx_unicast);
2762         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2763                             I40E_GLPRT_MPTCL(hw->port),
2764                             pf->offset_loaded, &os->eth.tx_multicast,
2765                             &ns->eth.tx_multicast);
2766         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2767                             I40E_GLPRT_BPTCL(hw->port),
2768                             pf->offset_loaded, &os->eth.tx_broadcast,
2769                             &ns->eth.tx_broadcast);
2770         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2771                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2772
2773         /* exclude internal tx bytes
2774          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2775          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2776          * value.
2777          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2778          */
2779         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2780                 ns->eth.tx_bytes = 0;
2781         else
2782                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2783
2784         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2785                 ns->eth.tx_unicast = 0;
2786         else
2787                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2788
2789         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2790                 ns->eth.tx_multicast = 0;
2791         else
2792                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2793
2794         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2795                 ns->eth.tx_broadcast = 0;
2796         else
2797                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2798
2799         /* GLPRT_TEPC not supported */
2800
2801         /* additional port specific stats */
2802         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2803                             pf->offset_loaded, &os->tx_dropped_link_down,
2804                             &ns->tx_dropped_link_down);
2805         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2806                             pf->offset_loaded, &os->crc_errors,
2807                             &ns->crc_errors);
2808         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2809                             pf->offset_loaded, &os->illegal_bytes,
2810                             &ns->illegal_bytes);
2811         /* GLPRT_ERRBC not supported */
2812         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2813                             pf->offset_loaded, &os->mac_local_faults,
2814                             &ns->mac_local_faults);
2815         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2816                             pf->offset_loaded, &os->mac_remote_faults,
2817                             &ns->mac_remote_faults);
2818         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2819                             pf->offset_loaded, &os->rx_length_errors,
2820                             &ns->rx_length_errors);
2821         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2822                             pf->offset_loaded, &os->link_xon_rx,
2823                             &ns->link_xon_rx);
2824         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2825                             pf->offset_loaded, &os->link_xoff_rx,
2826                             &ns->link_xoff_rx);
2827         for (i = 0; i < 8; i++) {
2828                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2829                                     pf->offset_loaded,
2830                                     &os->priority_xon_rx[i],
2831                                     &ns->priority_xon_rx[i]);
2832                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2833                                     pf->offset_loaded,
2834                                     &os->priority_xoff_rx[i],
2835                                     &ns->priority_xoff_rx[i]);
2836         }
2837         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2838                             pf->offset_loaded, &os->link_xon_tx,
2839                             &ns->link_xon_tx);
2840         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2841                             pf->offset_loaded, &os->link_xoff_tx,
2842                             &ns->link_xoff_tx);
2843         for (i = 0; i < 8; i++) {
2844                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2845                                     pf->offset_loaded,
2846                                     &os->priority_xon_tx[i],
2847                                     &ns->priority_xon_tx[i]);
2848                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2849                                     pf->offset_loaded,
2850                                     &os->priority_xoff_tx[i],
2851                                     &ns->priority_xoff_tx[i]);
2852                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2853                                     pf->offset_loaded,
2854                                     &os->priority_xon_2_xoff[i],
2855                                     &ns->priority_xon_2_xoff[i]);
2856         }
2857         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2858                             I40E_GLPRT_PRC64L(hw->port),
2859                             pf->offset_loaded, &os->rx_size_64,
2860                             &ns->rx_size_64);
2861         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2862                             I40E_GLPRT_PRC127L(hw->port),
2863                             pf->offset_loaded, &os->rx_size_127,
2864                             &ns->rx_size_127);
2865         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2866                             I40E_GLPRT_PRC255L(hw->port),
2867                             pf->offset_loaded, &os->rx_size_255,
2868                             &ns->rx_size_255);
2869         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2870                             I40E_GLPRT_PRC511L(hw->port),
2871                             pf->offset_loaded, &os->rx_size_511,
2872                             &ns->rx_size_511);
2873         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2874                             I40E_GLPRT_PRC1023L(hw->port),
2875                             pf->offset_loaded, &os->rx_size_1023,
2876                             &ns->rx_size_1023);
2877         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2878                             I40E_GLPRT_PRC1522L(hw->port),
2879                             pf->offset_loaded, &os->rx_size_1522,
2880                             &ns->rx_size_1522);
2881         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2882                             I40E_GLPRT_PRC9522L(hw->port),
2883                             pf->offset_loaded, &os->rx_size_big,
2884                             &ns->rx_size_big);
2885         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2886                             pf->offset_loaded, &os->rx_undersize,
2887                             &ns->rx_undersize);
2888         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2889                             pf->offset_loaded, &os->rx_fragments,
2890                             &ns->rx_fragments);
2891         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2892                             pf->offset_loaded, &os->rx_oversize,
2893                             &ns->rx_oversize);
2894         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2895                             pf->offset_loaded, &os->rx_jabber,
2896                             &ns->rx_jabber);
2897         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2898                             I40E_GLPRT_PTC64L(hw->port),
2899                             pf->offset_loaded, &os->tx_size_64,
2900                             &ns->tx_size_64);
2901         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2902                             I40E_GLPRT_PTC127L(hw->port),
2903                             pf->offset_loaded, &os->tx_size_127,
2904                             &ns->tx_size_127);
2905         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2906                             I40E_GLPRT_PTC255L(hw->port),
2907                             pf->offset_loaded, &os->tx_size_255,
2908                             &ns->tx_size_255);
2909         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2910                             I40E_GLPRT_PTC511L(hw->port),
2911                             pf->offset_loaded, &os->tx_size_511,
2912                             &ns->tx_size_511);
2913         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2914                             I40E_GLPRT_PTC1023L(hw->port),
2915                             pf->offset_loaded, &os->tx_size_1023,
2916                             &ns->tx_size_1023);
2917         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2918                             I40E_GLPRT_PTC1522L(hw->port),
2919                             pf->offset_loaded, &os->tx_size_1522,
2920                             &ns->tx_size_1522);
2921         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2922                             I40E_GLPRT_PTC9522L(hw->port),
2923                             pf->offset_loaded, &os->tx_size_big,
2924                             &ns->tx_size_big);
2925         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2926                            pf->offset_loaded,
2927                            &os->fd_sb_match, &ns->fd_sb_match);
2928         /* GLPRT_MSPDC not supported */
2929         /* GLPRT_XEC not supported */
2930
2931         pf->offset_loaded = true;
2932
2933         if (pf->main_vsi)
2934                 i40e_update_vsi_stats(pf->main_vsi);
2935 }
2936
2937 /* Get all statistics of a port */
2938 static int
2939 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2940 {
2941         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2942         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2943         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2944         unsigned i;
2945
2946         /* call read registers - updates values, now write them to struct */
2947         i40e_read_stats_registers(pf, hw);
2948
2949         stats->ipackets = ns->eth.rx_unicast +
2950                         ns->eth.rx_multicast +
2951                         ns->eth.rx_broadcast -
2952                         ns->eth.rx_discards -
2953                         pf->main_vsi->eth_stats.rx_discards;
2954         stats->opackets = ns->eth.tx_unicast +
2955                         ns->eth.tx_multicast +
2956                         ns->eth.tx_broadcast;
2957         stats->ibytes   = ns->eth.rx_bytes;
2958         stats->obytes   = ns->eth.tx_bytes;
2959         stats->oerrors  = ns->eth.tx_errors +
2960                         pf->main_vsi->eth_stats.tx_errors;
2961
2962         /* Rx Errors */
2963         stats->imissed  = ns->eth.rx_discards +
2964                         pf->main_vsi->eth_stats.rx_discards;
2965         stats->ierrors  = ns->crc_errors +
2966                         ns->rx_length_errors + ns->rx_undersize +
2967                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2968
2969         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2970         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2971         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2972         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2973         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2974         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2975         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2976                     ns->eth.rx_unknown_protocol);
2977         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2978         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2979         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2980         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2981         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2982         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2983
2984         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2985                     ns->tx_dropped_link_down);
2986         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2987         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2988                     ns->illegal_bytes);
2989         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2990         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2991                     ns->mac_local_faults);
2992         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2993                     ns->mac_remote_faults);
2994         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2995                     ns->rx_length_errors);
2996         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2997         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2998         for (i = 0; i < 8; i++) {
2999                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3000                                 i, ns->priority_xon_rx[i]);
3001                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3002                                 i, ns->priority_xoff_rx[i]);
3003         }
3004         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3005         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3006         for (i = 0; i < 8; i++) {
3007                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3008                                 i, ns->priority_xon_tx[i]);
3009                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3010                                 i, ns->priority_xoff_tx[i]);
3011                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3012                                 i, ns->priority_xon_2_xoff[i]);
3013         }
3014         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3015         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3016         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3017         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3018         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3019         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3020         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3021         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3022         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3023         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3024         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3025         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3026         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3027         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3028         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3029         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3030         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3031         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3032         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3033                         ns->mac_short_packet_dropped);
3034         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3035                     ns->checksum_error);
3036         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3037         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3038         return 0;
3039 }
3040
3041 /* Reset the statistics */
3042 static void
3043 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3044 {
3045         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3046         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3047
3048         /* Mark PF and VSI stats to update the offset, aka "reset" */
3049         pf->offset_loaded = false;
3050         if (pf->main_vsi)
3051                 pf->main_vsi->offset_loaded = false;
3052
3053         /* read the stats, reading current register values into offset */
3054         i40e_read_stats_registers(pf, hw);
3055 }
3056
3057 static uint32_t
3058 i40e_xstats_calc_num(void)
3059 {
3060         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3061                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3062                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3063 }
3064
3065 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3066                                      struct rte_eth_xstat_name *xstats_names,
3067                                      __rte_unused unsigned limit)
3068 {
3069         unsigned count = 0;
3070         unsigned i, prio;
3071
3072         if (xstats_names == NULL)
3073                 return i40e_xstats_calc_num();
3074
3075         /* Note: limit checked in rte_eth_xstats_names() */
3076
3077         /* Get stats from i40e_eth_stats struct */
3078         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3079                 snprintf(xstats_names[count].name,
3080                          sizeof(xstats_names[count].name),
3081                          "%s", rte_i40e_stats_strings[i].name);
3082                 count++;
3083         }
3084
3085         /* Get individiual stats from i40e_hw_port struct */
3086         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3087                 snprintf(xstats_names[count].name,
3088                         sizeof(xstats_names[count].name),
3089                          "%s", rte_i40e_hw_port_strings[i].name);
3090                 count++;
3091         }
3092
3093         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3094                 for (prio = 0; prio < 8; prio++) {
3095                         snprintf(xstats_names[count].name,
3096                                  sizeof(xstats_names[count].name),
3097                                  "rx_priority%u_%s", prio,
3098                                  rte_i40e_rxq_prio_strings[i].name);
3099                         count++;
3100                 }
3101         }
3102
3103         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3104                 for (prio = 0; prio < 8; prio++) {
3105                         snprintf(xstats_names[count].name,
3106                                  sizeof(xstats_names[count].name),
3107                                  "tx_priority%u_%s", prio,
3108                                  rte_i40e_txq_prio_strings[i].name);
3109                         count++;
3110                 }
3111         }
3112         return count;
3113 }
3114
3115 static int
3116 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3117                     unsigned n)
3118 {
3119         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3120         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3121         unsigned i, count, prio;
3122         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3123
3124         count = i40e_xstats_calc_num();
3125         if (n < count)
3126                 return count;
3127
3128         i40e_read_stats_registers(pf, hw);
3129
3130         if (xstats == NULL)
3131                 return 0;
3132
3133         count = 0;
3134
3135         /* Get stats from i40e_eth_stats struct */
3136         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3137                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3138                         rte_i40e_stats_strings[i].offset);
3139                 xstats[count].id = count;
3140                 count++;
3141         }
3142
3143         /* Get individiual stats from i40e_hw_port struct */
3144         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3145                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3146                         rte_i40e_hw_port_strings[i].offset);
3147                 xstats[count].id = count;
3148                 count++;
3149         }
3150
3151         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3152                 for (prio = 0; prio < 8; prio++) {
3153                         xstats[count].value =
3154                                 *(uint64_t *)(((char *)hw_stats) +
3155                                 rte_i40e_rxq_prio_strings[i].offset +
3156                                 (sizeof(uint64_t) * prio));
3157                         xstats[count].id = count;
3158                         count++;
3159                 }
3160         }
3161
3162         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3163                 for (prio = 0; prio < 8; prio++) {
3164                         xstats[count].value =
3165                                 *(uint64_t *)(((char *)hw_stats) +
3166                                 rte_i40e_txq_prio_strings[i].offset +
3167                                 (sizeof(uint64_t) * prio));
3168                         xstats[count].id = count;
3169                         count++;
3170                 }
3171         }
3172
3173         return count;
3174 }
3175
3176 static int
3177 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3178                                  __rte_unused uint16_t queue_id,
3179                                  __rte_unused uint8_t stat_idx,
3180                                  __rte_unused uint8_t is_rx)
3181 {
3182         PMD_INIT_FUNC_TRACE();
3183
3184         return -ENOSYS;
3185 }
3186
3187 static int
3188 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3189 {
3190         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3191         u32 full_ver;
3192         u8 ver, patch;
3193         u16 build;
3194         int ret;
3195
3196         full_ver = hw->nvm.oem_ver;
3197         ver = (u8)(full_ver >> 24);
3198         build = (u16)((full_ver >> 8) & 0xffff);
3199         patch = (u8)(full_ver & 0xff);
3200
3201         ret = snprintf(fw_version, fw_size,
3202                  "%d.%d%d 0x%08x %d.%d.%d",
3203                  ((hw->nvm.version >> 12) & 0xf),
3204                  ((hw->nvm.version >> 4) & 0xff),
3205                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3206                  ver, build, patch);
3207
3208         ret += 1; /* add the size of '\0' */
3209         if (fw_size < (u32)ret)
3210                 return ret;
3211         else
3212                 return 0;
3213 }
3214
3215 static void
3216 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3217 {
3218         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3219         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3220         struct i40e_vsi *vsi = pf->main_vsi;
3221         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3222
3223         dev_info->max_rx_queues = vsi->nb_qps;
3224         dev_info->max_tx_queues = vsi->nb_qps;
3225         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3226         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3227         dev_info->max_mac_addrs = vsi->max_macaddrs;
3228         dev_info->max_vfs = pci_dev->max_vfs;
3229         dev_info->rx_queue_offload_capa = 0;
3230         dev_info->rx_offload_capa =
3231                 DEV_RX_OFFLOAD_VLAN_STRIP |
3232                 DEV_RX_OFFLOAD_QINQ_STRIP |
3233                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3234                 DEV_RX_OFFLOAD_UDP_CKSUM |
3235                 DEV_RX_OFFLOAD_TCP_CKSUM |
3236                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3237                 DEV_RX_OFFLOAD_CRC_STRIP |
3238                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3239                 DEV_RX_OFFLOAD_VLAN_FILTER;
3240
3241         dev_info->tx_queue_offload_capa = 0;
3242         dev_info->tx_offload_capa =
3243                 DEV_TX_OFFLOAD_VLAN_INSERT |
3244                 DEV_TX_OFFLOAD_QINQ_INSERT |
3245                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3246                 DEV_TX_OFFLOAD_UDP_CKSUM |
3247                 DEV_TX_OFFLOAD_TCP_CKSUM |
3248                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3249                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3250                 DEV_TX_OFFLOAD_TCP_TSO |
3251                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3252                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3253                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3254                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3255         dev_info->dev_capa =
3256                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3257                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3258
3259         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3260                                                 sizeof(uint32_t);
3261         dev_info->reta_size = pf->hash_lut_size;
3262         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3263
3264         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3265                 .rx_thresh = {
3266                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3267                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3268                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3269                 },
3270                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3271                 .rx_drop_en = 0,
3272                 .offloads = 0,
3273         };
3274
3275         dev_info->default_txconf = (struct rte_eth_txconf) {
3276                 .tx_thresh = {
3277                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3278                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3279                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3280                 },
3281                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3282                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3283                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3284                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3285         };
3286
3287         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3288                 .nb_max = I40E_MAX_RING_DESC,
3289                 .nb_min = I40E_MIN_RING_DESC,
3290                 .nb_align = I40E_ALIGN_RING_DESC,
3291         };
3292
3293         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3294                 .nb_max = I40E_MAX_RING_DESC,
3295                 .nb_min = I40E_MIN_RING_DESC,
3296                 .nb_align = I40E_ALIGN_RING_DESC,
3297                 .nb_seg_max = I40E_TX_MAX_SEG,
3298                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3299         };
3300
3301         if (pf->flags & I40E_FLAG_VMDQ) {
3302                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3303                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3304                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3305                                                 pf->max_nb_vmdq_vsi;
3306                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3307                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3308                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3309         }
3310
3311         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3312                 /* For XL710 */
3313                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3314                 dev_info->default_rxportconf.nb_queues = 2;
3315                 dev_info->default_txportconf.nb_queues = 2;
3316                 if (dev->data->nb_rx_queues == 1)
3317                         dev_info->default_rxportconf.ring_size = 2048;
3318                 else
3319                         dev_info->default_rxportconf.ring_size = 1024;
3320                 if (dev->data->nb_tx_queues == 1)
3321                         dev_info->default_txportconf.ring_size = 1024;
3322                 else
3323                         dev_info->default_txportconf.ring_size = 512;
3324
3325         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3326                 /* For XXV710 */
3327                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3328                 dev_info->default_rxportconf.nb_queues = 1;
3329                 dev_info->default_txportconf.nb_queues = 1;
3330                 dev_info->default_rxportconf.ring_size = 256;
3331                 dev_info->default_txportconf.ring_size = 256;
3332         } else {
3333                 /* For X710 */
3334                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3335                 dev_info->default_rxportconf.nb_queues = 1;
3336                 dev_info->default_txportconf.nb_queues = 1;
3337                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3338                         dev_info->default_rxportconf.ring_size = 512;
3339                         dev_info->default_txportconf.ring_size = 256;
3340                 } else {
3341                         dev_info->default_rxportconf.ring_size = 256;
3342                         dev_info->default_txportconf.ring_size = 256;
3343                 }
3344         }
3345         dev_info->default_rxportconf.burst_size = 32;
3346         dev_info->default_txportconf.burst_size = 32;
3347 }
3348
3349 static int
3350 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3351 {
3352         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3353         struct i40e_vsi *vsi = pf->main_vsi;
3354         PMD_INIT_FUNC_TRACE();
3355
3356         if (on)
3357                 return i40e_vsi_add_vlan(vsi, vlan_id);
3358         else
3359                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3360 }
3361
3362 static int
3363 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3364                                 enum rte_vlan_type vlan_type,
3365                                 uint16_t tpid, int qinq)
3366 {
3367         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3368         uint64_t reg_r = 0;
3369         uint64_t reg_w = 0;
3370         uint16_t reg_id = 3;
3371         int ret;
3372
3373         if (qinq) {
3374                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3375                         reg_id = 2;
3376         }
3377
3378         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3379                                           &reg_r, NULL);
3380         if (ret != I40E_SUCCESS) {
3381                 PMD_DRV_LOG(ERR,
3382                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3383                            reg_id);
3384                 return -EIO;
3385         }
3386         PMD_DRV_LOG(DEBUG,
3387                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3388                     reg_id, reg_r);
3389
3390         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3391         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3392         if (reg_r == reg_w) {
3393                 PMD_DRV_LOG(DEBUG, "No need to write");
3394                 return 0;
3395         }
3396
3397         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3398                                            reg_w, NULL);
3399         if (ret != I40E_SUCCESS) {
3400                 PMD_DRV_LOG(ERR,
3401                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3402                             reg_id);
3403                 return -EIO;
3404         }
3405         PMD_DRV_LOG(DEBUG,
3406                     "Global register 0x%08x is changed with value 0x%08x",
3407                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3408
3409         return 0;
3410 }
3411
3412 static int
3413 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3414                    enum rte_vlan_type vlan_type,
3415                    uint16_t tpid)
3416 {
3417         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3418         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3419         int qinq = dev->data->dev_conf.rxmode.offloads &
3420                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3421         int ret = 0;
3422
3423         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3424              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3425             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3426                 PMD_DRV_LOG(ERR,
3427                             "Unsupported vlan type.");
3428                 return -EINVAL;
3429         }
3430
3431         if (pf->support_multi_driver) {
3432                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3433                 return -ENOTSUP;
3434         }
3435
3436         /* 802.1ad frames ability is added in NVM API 1.7*/
3437         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3438                 if (qinq) {
3439                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3440                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3441                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3442                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3443                 } else {
3444                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3445                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3446                 }
3447                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3448                 if (ret != I40E_SUCCESS) {
3449                         PMD_DRV_LOG(ERR,
3450                                     "Set switch config failed aq_err: %d",
3451                                     hw->aq.asq_last_status);
3452                         ret = -EIO;
3453                 }
3454         } else
3455                 /* If NVM API < 1.7, keep the register setting */
3456                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3457                                                       tpid, qinq);
3458         i40e_global_cfg_warning(I40E_WARNING_TPID);
3459
3460         return ret;
3461 }
3462
3463 static int
3464 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3465 {
3466         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3467         struct i40e_vsi *vsi = pf->main_vsi;
3468         struct rte_eth_rxmode *rxmode;
3469
3470         rxmode = &dev->data->dev_conf.rxmode;
3471         if (mask & ETH_VLAN_FILTER_MASK) {
3472                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3473                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3474                 else
3475                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3476         }
3477
3478         if (mask & ETH_VLAN_STRIP_MASK) {
3479                 /* Enable or disable VLAN stripping */
3480                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3481                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3482                 else
3483                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3484         }
3485
3486         if (mask & ETH_VLAN_EXTEND_MASK) {
3487                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
3488                         i40e_vsi_config_double_vlan(vsi, TRUE);
3489                         /* Set global registers with default ethertype. */
3490                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3491                                            ETHER_TYPE_VLAN);
3492                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3493                                            ETHER_TYPE_VLAN);
3494                 }
3495                 else
3496                         i40e_vsi_config_double_vlan(vsi, FALSE);
3497         }
3498
3499         return 0;
3500 }
3501
3502 static void
3503 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3504                           __rte_unused uint16_t queue,
3505                           __rte_unused int on)
3506 {
3507         PMD_INIT_FUNC_TRACE();
3508 }
3509
3510 static int
3511 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3512 {
3513         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3514         struct i40e_vsi *vsi = pf->main_vsi;
3515         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3516         struct i40e_vsi_vlan_pvid_info info;
3517
3518         memset(&info, 0, sizeof(info));
3519         info.on = on;
3520         if (info.on)
3521                 info.config.pvid = pvid;
3522         else {
3523                 info.config.reject.tagged =
3524                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3525                 info.config.reject.untagged =
3526                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3527         }
3528
3529         return i40e_vsi_vlan_pvid_set(vsi, &info);
3530 }
3531
3532 static int
3533 i40e_dev_led_on(struct rte_eth_dev *dev)
3534 {
3535         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3536         uint32_t mode = i40e_led_get(hw);
3537
3538         if (mode == 0)
3539                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3540
3541         return 0;
3542 }
3543
3544 static int
3545 i40e_dev_led_off(struct rte_eth_dev *dev)
3546 {
3547         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3548         uint32_t mode = i40e_led_get(hw);
3549
3550         if (mode != 0)
3551                 i40e_led_set(hw, 0, false);
3552
3553         return 0;
3554 }
3555
3556 static int
3557 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3558 {
3559         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3560         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3561
3562         fc_conf->pause_time = pf->fc_conf.pause_time;
3563
3564         /* read out from register, in case they are modified by other port */
3565         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3566                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3567         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3568                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3569
3570         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3571         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3572
3573          /* Return current mode according to actual setting*/
3574         switch (hw->fc.current_mode) {
3575         case I40E_FC_FULL:
3576                 fc_conf->mode = RTE_FC_FULL;
3577                 break;
3578         case I40E_FC_TX_PAUSE:
3579                 fc_conf->mode = RTE_FC_TX_PAUSE;
3580                 break;
3581         case I40E_FC_RX_PAUSE:
3582                 fc_conf->mode = RTE_FC_RX_PAUSE;
3583                 break;
3584         case I40E_FC_NONE:
3585         default:
3586                 fc_conf->mode = RTE_FC_NONE;
3587         };
3588
3589         return 0;
3590 }
3591
3592 static int
3593 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3594 {
3595         uint32_t mflcn_reg, fctrl_reg, reg;
3596         uint32_t max_high_water;
3597         uint8_t i, aq_failure;
3598         int err;
3599         struct i40e_hw *hw;
3600         struct i40e_pf *pf;
3601         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3602                 [RTE_FC_NONE] = I40E_FC_NONE,
3603                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3604                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3605                 [RTE_FC_FULL] = I40E_FC_FULL
3606         };
3607
3608         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3609
3610         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3611         if ((fc_conf->high_water > max_high_water) ||
3612                         (fc_conf->high_water < fc_conf->low_water)) {
3613                 PMD_INIT_LOG(ERR,
3614                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3615                         max_high_water);
3616                 return -EINVAL;
3617         }
3618
3619         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3620         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3621         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3622
3623         pf->fc_conf.pause_time = fc_conf->pause_time;
3624         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3625         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3626
3627         PMD_INIT_FUNC_TRACE();
3628
3629         /* All the link flow control related enable/disable register
3630          * configuration is handle by the F/W
3631          */
3632         err = i40e_set_fc(hw, &aq_failure, true);
3633         if (err < 0)
3634                 return -ENOSYS;
3635
3636         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3637                 /* Configure flow control refresh threshold,
3638                  * the value for stat_tx_pause_refresh_timer[8]
3639                  * is used for global pause operation.
3640                  */
3641
3642                 I40E_WRITE_REG(hw,
3643                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3644                                pf->fc_conf.pause_time);
3645
3646                 /* configure the timer value included in transmitted pause
3647                  * frame,
3648                  * the value for stat_tx_pause_quanta[8] is used for global
3649                  * pause operation
3650                  */
3651                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3652                                pf->fc_conf.pause_time);
3653
3654                 fctrl_reg = I40E_READ_REG(hw,
3655                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3656
3657                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3658                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3659                 else
3660                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3661
3662                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3663                                fctrl_reg);
3664         } else {
3665                 /* Configure pause time (2 TCs per register) */
3666                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3667                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3668                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3669
3670                 /* Configure flow control refresh threshold value */
3671                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3672                                pf->fc_conf.pause_time / 2);
3673
3674                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3675
3676                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3677                  *depending on configuration
3678                  */
3679                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3680                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3681                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3682                 } else {
3683                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3684                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3685                 }
3686
3687                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3688         }
3689
3690         if (!pf->support_multi_driver) {
3691                 /* config water marker both based on the packets and bytes */
3692                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3693                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3694                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3695                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3696                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3697                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3698                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3699                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3700                                   << I40E_KILOSHIFT);
3701                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3702                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3703                                    << I40E_KILOSHIFT);
3704                 i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
3705         } else {
3706                 PMD_DRV_LOG(ERR,
3707                             "Water marker configuration is not supported.");
3708         }
3709
3710         I40E_WRITE_FLUSH(hw);
3711
3712         return 0;
3713 }
3714
3715 static int
3716 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3717                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3718 {
3719         PMD_INIT_FUNC_TRACE();
3720
3721         return -ENOSYS;
3722 }
3723
3724 /* Add a MAC address, and update filters */
3725 static int
3726 i40e_macaddr_add(struct rte_eth_dev *dev,
3727                  struct ether_addr *mac_addr,
3728                  __rte_unused uint32_t index,
3729                  uint32_t pool)
3730 {
3731         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3732         struct i40e_mac_filter_info mac_filter;
3733         struct i40e_vsi *vsi;
3734         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
3735         int ret;
3736
3737         /* If VMDQ not enabled or configured, return */
3738         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3739                           !pf->nb_cfg_vmdq_vsi)) {
3740                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3741                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3742                         pool);
3743                 return -ENOTSUP;
3744         }
3745
3746         if (pool > pf->nb_cfg_vmdq_vsi) {
3747                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3748                                 pool, pf->nb_cfg_vmdq_vsi);
3749                 return -EINVAL;
3750         }
3751
3752         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3753         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3754                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3755         else
3756                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3757
3758         if (pool == 0)
3759                 vsi = pf->main_vsi;
3760         else
3761                 vsi = pf->vmdq[pool - 1].vsi;
3762
3763         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3764         if (ret != I40E_SUCCESS) {
3765                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3766                 return -ENODEV;
3767         }
3768         return 0;
3769 }
3770
3771 /* Remove a MAC address, and update filters */
3772 static void
3773 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3774 {
3775         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3776         struct i40e_vsi *vsi;
3777         struct rte_eth_dev_data *data = dev->data;
3778         struct ether_addr *macaddr;
3779         int ret;
3780         uint32_t i;
3781         uint64_t pool_sel;
3782
3783         macaddr = &(data->mac_addrs[index]);
3784
3785         pool_sel = dev->data->mac_pool_sel[index];
3786
3787         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3788                 if (pool_sel & (1ULL << i)) {
3789                         if (i == 0)
3790                                 vsi = pf->main_vsi;
3791                         else {
3792                                 /* No VMDQ pool enabled or configured */
3793                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3794                                         (i > pf->nb_cfg_vmdq_vsi)) {
3795                                         PMD_DRV_LOG(ERR,
3796                                                 "No VMDQ pool enabled/configured");
3797                                         return;
3798                                 }
3799                                 vsi = pf->vmdq[i - 1].vsi;
3800                         }
3801                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3802
3803                         if (ret) {
3804                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3805                                 return;
3806                         }
3807                 }
3808         }
3809 }
3810
3811 /* Set perfect match or hash match of MAC and VLAN for a VF */
3812 static int
3813 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3814                  struct rte_eth_mac_filter *filter,
3815                  bool add)
3816 {
3817         struct i40e_hw *hw;
3818         struct i40e_mac_filter_info mac_filter;
3819         struct ether_addr old_mac;
3820         struct ether_addr *new_mac;
3821         struct i40e_pf_vf *vf = NULL;
3822         uint16_t vf_id;
3823         int ret;
3824
3825         if (pf == NULL) {
3826                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3827                 return -EINVAL;
3828         }
3829         hw = I40E_PF_TO_HW(pf);
3830
3831         if (filter == NULL) {
3832                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3833                 return -EINVAL;
3834         }
3835
3836         new_mac = &filter->mac_addr;
3837
3838         if (is_zero_ether_addr(new_mac)) {
3839                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3840                 return -EINVAL;
3841         }
3842
3843         vf_id = filter->dst_id;
3844
3845         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3846                 PMD_DRV_LOG(ERR, "Invalid argument.");
3847                 return -EINVAL;
3848         }
3849         vf = &pf->vfs[vf_id];
3850
3851         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3852                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3853                 return -EINVAL;
3854         }
3855
3856         if (add) {
3857                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3858                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3859                                 ETHER_ADDR_LEN);
3860                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3861                                  ETHER_ADDR_LEN);
3862
3863                 mac_filter.filter_type = filter->filter_type;
3864                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3865                 if (ret != I40E_SUCCESS) {
3866                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3867                         return -1;
3868                 }
3869                 ether_addr_copy(new_mac, &pf->dev_addr);
3870         } else {
3871                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3872                                 ETHER_ADDR_LEN);
3873                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3874                 if (ret != I40E_SUCCESS) {
3875                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3876                         return -1;
3877                 }
3878
3879                 /* Clear device address as it has been removed */
3880                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3881                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3882         }
3883
3884         return 0;
3885 }
3886
3887 /* MAC filter handle */
3888 static int
3889 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3890                 void *arg)
3891 {
3892         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3893         struct rte_eth_mac_filter *filter;
3894         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3895         int ret = I40E_NOT_SUPPORTED;
3896
3897         filter = (struct rte_eth_mac_filter *)(arg);
3898
3899         switch (filter_op) {
3900         case RTE_ETH_FILTER_NOP:
3901                 ret = I40E_SUCCESS;
3902                 break;
3903         case RTE_ETH_FILTER_ADD:
3904                 i40e_pf_disable_irq0(hw);
3905                 if (filter->is_vf)
3906                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3907                 i40e_pf_enable_irq0(hw);
3908                 break;
3909         case RTE_ETH_FILTER_DELETE:
3910                 i40e_pf_disable_irq0(hw);
3911                 if (filter->is_vf)
3912                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3913                 i40e_pf_enable_irq0(hw);
3914                 break;
3915         default:
3916                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3917                 ret = I40E_ERR_PARAM;
3918                 break;
3919         }
3920
3921         return ret;
3922 }
3923
3924 static int
3925 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3926 {
3927         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3928         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3929         uint32_t reg;
3930         int ret;
3931
3932         if (!lut)
3933                 return -EINVAL;
3934
3935         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3936                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3937                                           lut, lut_size);
3938                 if (ret) {
3939                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3940                         return ret;
3941                 }
3942         } else {
3943                 uint32_t *lut_dw = (uint32_t *)lut;
3944                 uint16_t i, lut_size_dw = lut_size / 4;
3945
3946                 if (vsi->type == I40E_VSI_SRIOV) {
3947                         for (i = 0; i <= lut_size_dw; i++) {
3948                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
3949                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
3950                         }
3951                 } else {
3952                         for (i = 0; i < lut_size_dw; i++)
3953                                 lut_dw[i] = I40E_READ_REG(hw,
3954                                                           I40E_PFQF_HLUT(i));
3955                 }
3956         }
3957
3958         return 0;
3959 }
3960
3961 int
3962 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3963 {
3964         struct i40e_pf *pf;
3965         struct i40e_hw *hw;
3966         int ret;
3967
3968         if (!vsi || !lut)
3969                 return -EINVAL;
3970
3971         pf = I40E_VSI_TO_PF(vsi);
3972         hw = I40E_VSI_TO_HW(vsi);
3973
3974         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3975                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3976                                           lut, lut_size);
3977                 if (ret) {
3978                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3979                         return ret;
3980                 }
3981         } else {
3982                 uint32_t *lut_dw = (uint32_t *)lut;
3983                 uint16_t i, lut_size_dw = lut_size / 4;
3984
3985                 if (vsi->type == I40E_VSI_SRIOV) {
3986                         for (i = 0; i < lut_size_dw; i++)
3987                                 I40E_WRITE_REG(
3988                                         hw,
3989                                         I40E_VFQF_HLUT1(i, vsi->user_param),
3990                                         lut_dw[i]);
3991                 } else {
3992                         for (i = 0; i < lut_size_dw; i++)
3993                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
3994                                                lut_dw[i]);
3995                 }
3996                 I40E_WRITE_FLUSH(hw);
3997         }
3998
3999         return 0;
4000 }
4001
4002 static int
4003 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4004                          struct rte_eth_rss_reta_entry64 *reta_conf,
4005                          uint16_t reta_size)
4006 {
4007         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4008         uint16_t i, lut_size = pf->hash_lut_size;
4009         uint16_t idx, shift;
4010         uint8_t *lut;
4011         int ret;
4012
4013         if (reta_size != lut_size ||
4014                 reta_size > ETH_RSS_RETA_SIZE_512) {
4015                 PMD_DRV_LOG(ERR,
4016                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4017                         reta_size, lut_size);
4018                 return -EINVAL;
4019         }
4020
4021         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4022         if (!lut) {
4023                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4024                 return -ENOMEM;
4025         }
4026         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4027         if (ret)
4028                 goto out;
4029         for (i = 0; i < reta_size; i++) {
4030                 idx = i / RTE_RETA_GROUP_SIZE;
4031                 shift = i % RTE_RETA_GROUP_SIZE;
4032                 if (reta_conf[idx].mask & (1ULL << shift))
4033                         lut[i] = reta_conf[idx].reta[shift];
4034         }
4035         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4036
4037 out:
4038         rte_free(lut);
4039
4040         return ret;
4041 }
4042
4043 static int
4044 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4045                         struct rte_eth_rss_reta_entry64 *reta_conf,
4046                         uint16_t reta_size)
4047 {
4048         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4049         uint16_t i, lut_size = pf->hash_lut_size;
4050         uint16_t idx, shift;
4051         uint8_t *lut;
4052         int ret;
4053
4054         if (reta_size != lut_size ||
4055                 reta_size > ETH_RSS_RETA_SIZE_512) {
4056                 PMD_DRV_LOG(ERR,
4057                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4058                         reta_size, lut_size);
4059                 return -EINVAL;
4060         }
4061
4062         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4063         if (!lut) {
4064                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4065                 return -ENOMEM;
4066         }
4067
4068         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4069         if (ret)
4070                 goto out;
4071         for (i = 0; i < reta_size; i++) {
4072                 idx = i / RTE_RETA_GROUP_SIZE;
4073                 shift = i % RTE_RETA_GROUP_SIZE;
4074                 if (reta_conf[idx].mask & (1ULL << shift))
4075                         reta_conf[idx].reta[shift] = lut[i];
4076         }
4077
4078 out:
4079         rte_free(lut);
4080
4081         return ret;
4082 }
4083
4084 /**
4085  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4086  * @hw:   pointer to the HW structure
4087  * @mem:  pointer to mem struct to fill out
4088  * @size: size of memory requested
4089  * @alignment: what to align the allocation to
4090  **/
4091 enum i40e_status_code
4092 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4093                         struct i40e_dma_mem *mem,
4094                         u64 size,
4095                         u32 alignment)
4096 {
4097         const struct rte_memzone *mz = NULL;
4098         char z_name[RTE_MEMZONE_NAMESIZE];
4099
4100         if (!mem)
4101                 return I40E_ERR_PARAM;
4102
4103         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4104         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4105                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4106         if (!mz)
4107                 return I40E_ERR_NO_MEMORY;
4108
4109         mem->size = size;
4110         mem->va = mz->addr;
4111         mem->pa = mz->iova;
4112         mem->zone = (const void *)mz;
4113         PMD_DRV_LOG(DEBUG,
4114                 "memzone %s allocated with physical address: %"PRIu64,
4115                 mz->name, mem->pa);
4116
4117         return I40E_SUCCESS;
4118 }
4119
4120 /**
4121  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4122  * @hw:   pointer to the HW structure
4123  * @mem:  ptr to mem struct to free
4124  **/
4125 enum i40e_status_code
4126 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4127                     struct i40e_dma_mem *mem)
4128 {
4129         if (!mem)
4130                 return I40E_ERR_PARAM;
4131
4132         PMD_DRV_LOG(DEBUG,
4133                 "memzone %s to be freed with physical address: %"PRIu64,
4134                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4135         rte_memzone_free((const struct rte_memzone *)mem->zone);
4136         mem->zone = NULL;
4137         mem->va = NULL;
4138         mem->pa = (u64)0;
4139
4140         return I40E_SUCCESS;
4141 }
4142
4143 /**
4144  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4145  * @hw:   pointer to the HW structure
4146  * @mem:  pointer to mem struct to fill out
4147  * @size: size of memory requested
4148  **/
4149 enum i40e_status_code
4150 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4151                          struct i40e_virt_mem *mem,
4152                          u32 size)
4153 {
4154         if (!mem)
4155                 return I40E_ERR_PARAM;
4156
4157         mem->size = size;
4158         mem->va = rte_zmalloc("i40e", size, 0);
4159
4160         if (mem->va)
4161                 return I40E_SUCCESS;
4162         else
4163                 return I40E_ERR_NO_MEMORY;
4164 }
4165
4166 /**
4167  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4168  * @hw:   pointer to the HW structure
4169  * @mem:  pointer to mem struct to free
4170  **/
4171 enum i40e_status_code
4172 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4173                      struct i40e_virt_mem *mem)
4174 {
4175         if (!mem)
4176                 return I40E_ERR_PARAM;
4177
4178         rte_free(mem->va);
4179         mem->va = NULL;
4180
4181         return I40E_SUCCESS;
4182 }
4183
4184 void
4185 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4186 {
4187         rte_spinlock_init(&sp->spinlock);
4188 }
4189
4190 void
4191 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4192 {
4193         rte_spinlock_lock(&sp->spinlock);
4194 }
4195
4196 void
4197 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4198 {
4199         rte_spinlock_unlock(&sp->spinlock);
4200 }
4201
4202 void
4203 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4204 {
4205         return;
4206 }
4207
4208 /**
4209  * Get the hardware capabilities, which will be parsed
4210  * and saved into struct i40e_hw.
4211  */
4212 static int
4213 i40e_get_cap(struct i40e_hw *hw)
4214 {
4215         struct i40e_aqc_list_capabilities_element_resp *buf;
4216         uint16_t len, size = 0;
4217         int ret;
4218
4219         /* Calculate a huge enough buff for saving response data temporarily */
4220         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4221                                                 I40E_MAX_CAP_ELE_NUM;
4222         buf = rte_zmalloc("i40e", len, 0);
4223         if (!buf) {
4224                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4225                 return I40E_ERR_NO_MEMORY;
4226         }
4227
4228         /* Get, parse the capabilities and save it to hw */
4229         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4230                         i40e_aqc_opc_list_func_capabilities, NULL);
4231         if (ret != I40E_SUCCESS)
4232                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4233
4234         /* Free the temporary buffer after being used */
4235         rte_free(buf);
4236
4237         return ret;
4238 }
4239
4240 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4241 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4242
4243 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4244                 const char *value,
4245                 void *opaque)
4246 {
4247         struct i40e_pf *pf;
4248         unsigned long num;
4249         char *end;
4250
4251         pf = (struct i40e_pf *)opaque;
4252         RTE_SET_USED(key);
4253
4254         errno = 0;
4255         num = strtoul(value, &end, 0);
4256         if (errno != 0 || end == value || *end != 0) {
4257                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4258                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4259                 return -(EINVAL);
4260         }
4261
4262         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4263                 pf->vf_nb_qp_max = (uint16_t)num;
4264         else
4265                 /* here return 0 to make next valid same argument work */
4266                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4267                             "power of 2 and equal or less than 16 !, Now it is "
4268                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4269
4270         return 0;
4271 }
4272
4273 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4274 {
4275         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4276         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4277         struct rte_kvargs *kvlist;
4278
4279         /* set default queue number per VF as 4 */
4280         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4281
4282         if (dev->device->devargs == NULL)
4283                 return 0;
4284
4285         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4286         if (kvlist == NULL)
4287                 return -(EINVAL);
4288
4289         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4290                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4291                             "the first invalid or last valid one is used !",
4292                             QUEUE_NUM_PER_VF_ARG);
4293
4294         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4295                            i40e_pf_parse_vf_queue_number_handler, pf);
4296
4297         rte_kvargs_free(kvlist);
4298
4299         return 0;
4300 }
4301
4302 static int
4303 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4304 {
4305         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4306         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4307         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4308         uint16_t qp_count = 0, vsi_count = 0;
4309
4310         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4311                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4312                 return -EINVAL;
4313         }
4314
4315         i40e_pf_config_vf_rxq_number(dev);
4316
4317         /* Add the parameter init for LFC */
4318         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4319         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4320         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4321
4322         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4323         pf->max_num_vsi = hw->func_caps.num_vsis;
4324         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4325         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4326
4327         /* FDir queue/VSI allocation */
4328         pf->fdir_qp_offset = 0;
4329         if (hw->func_caps.fd) {
4330                 pf->flags |= I40E_FLAG_FDIR;
4331                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4332         } else {
4333                 pf->fdir_nb_qps = 0;
4334         }
4335         qp_count += pf->fdir_nb_qps;
4336         vsi_count += 1;
4337
4338         /* LAN queue/VSI allocation */
4339         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4340         if (!hw->func_caps.rss) {
4341                 pf->lan_nb_qps = 1;
4342         } else {
4343                 pf->flags |= I40E_FLAG_RSS;
4344                 if (hw->mac.type == I40E_MAC_X722)
4345                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4346                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4347         }
4348         qp_count += pf->lan_nb_qps;
4349         vsi_count += 1;
4350
4351         /* VF queue/VSI allocation */
4352         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4353         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4354                 pf->flags |= I40E_FLAG_SRIOV;
4355                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4356                 pf->vf_num = pci_dev->max_vfs;
4357                 PMD_DRV_LOG(DEBUG,
4358                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4359                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4360         } else {
4361                 pf->vf_nb_qps = 0;
4362                 pf->vf_num = 0;
4363         }
4364         qp_count += pf->vf_nb_qps * pf->vf_num;
4365         vsi_count += pf->vf_num;
4366
4367         /* VMDq queue/VSI allocation */
4368         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4369         pf->vmdq_nb_qps = 0;
4370         pf->max_nb_vmdq_vsi = 0;
4371         if (hw->func_caps.vmdq) {
4372                 if (qp_count < hw->func_caps.num_tx_qp &&
4373                         vsi_count < hw->func_caps.num_vsis) {
4374                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4375                                 qp_count) / pf->vmdq_nb_qp_max;
4376
4377                         /* Limit the maximum number of VMDq vsi to the maximum
4378                          * ethdev can support
4379                          */
4380                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4381                                 hw->func_caps.num_vsis - vsi_count);
4382                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4383                                 ETH_64_POOLS);
4384                         if (pf->max_nb_vmdq_vsi) {
4385                                 pf->flags |= I40E_FLAG_VMDQ;
4386                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4387                                 PMD_DRV_LOG(DEBUG,
4388                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4389                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4390                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4391                         } else {
4392                                 PMD_DRV_LOG(INFO,
4393                                         "No enough queues left for VMDq");
4394                         }
4395                 } else {
4396                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4397                 }
4398         }
4399         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4400         vsi_count += pf->max_nb_vmdq_vsi;
4401
4402         if (hw->func_caps.dcb)
4403                 pf->flags |= I40E_FLAG_DCB;
4404
4405         if (qp_count > hw->func_caps.num_tx_qp) {
4406                 PMD_DRV_LOG(ERR,
4407                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4408                         qp_count, hw->func_caps.num_tx_qp);
4409                 return -EINVAL;
4410         }
4411         if (vsi_count > hw->func_caps.num_vsis) {
4412                 PMD_DRV_LOG(ERR,
4413                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4414                         vsi_count, hw->func_caps.num_vsis);
4415                 return -EINVAL;
4416         }
4417
4418         return 0;
4419 }
4420
4421 static int
4422 i40e_pf_get_switch_config(struct i40e_pf *pf)
4423 {
4424         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4425         struct i40e_aqc_get_switch_config_resp *switch_config;
4426         struct i40e_aqc_switch_config_element_resp *element;
4427         uint16_t start_seid = 0, num_reported;
4428         int ret;
4429
4430         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4431                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4432         if (!switch_config) {
4433                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4434                 return -ENOMEM;
4435         }
4436
4437         /* Get the switch configurations */
4438         ret = i40e_aq_get_switch_config(hw, switch_config,
4439                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4440         if (ret != I40E_SUCCESS) {
4441                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4442                 goto fail;
4443         }
4444         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4445         if (num_reported != 1) { /* The number should be 1 */
4446                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4447                 goto fail;
4448         }
4449
4450         /* Parse the switch configuration elements */
4451         element = &(switch_config->element[0]);
4452         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4453                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4454                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4455         } else
4456                 PMD_DRV_LOG(INFO, "Unknown element type");
4457
4458 fail:
4459         rte_free(switch_config);
4460
4461         return ret;
4462 }
4463
4464 static int
4465 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4466                         uint32_t num)
4467 {
4468         struct pool_entry *entry;
4469
4470         if (pool == NULL || num == 0)
4471                 return -EINVAL;
4472
4473         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4474         if (entry == NULL) {
4475                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4476                 return -ENOMEM;
4477         }
4478
4479         /* queue heap initialize */
4480         pool->num_free = num;
4481         pool->num_alloc = 0;
4482         pool->base = base;
4483         LIST_INIT(&pool->alloc_list);
4484         LIST_INIT(&pool->free_list);
4485
4486         /* Initialize element  */
4487         entry->base = 0;
4488         entry->len = num;
4489
4490         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4491         return 0;
4492 }
4493
4494 static void
4495 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4496 {
4497         struct pool_entry *entry, *next_entry;
4498
4499         if (pool == NULL)
4500                 return;
4501
4502         for (entry = LIST_FIRST(&pool->alloc_list);
4503                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4504                         entry = next_entry) {
4505                 LIST_REMOVE(entry, next);
4506                 rte_free(entry);
4507         }
4508
4509         for (entry = LIST_FIRST(&pool->free_list);
4510                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4511                         entry = next_entry) {
4512                 LIST_REMOVE(entry, next);
4513                 rte_free(entry);
4514         }
4515
4516         pool->num_free = 0;
4517         pool->num_alloc = 0;
4518         pool->base = 0;
4519         LIST_INIT(&pool->alloc_list);
4520         LIST_INIT(&pool->free_list);
4521 }
4522
4523 static int
4524 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4525                        uint32_t base)
4526 {
4527         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4528         uint32_t pool_offset;
4529         int insert;
4530
4531         if (pool == NULL) {
4532                 PMD_DRV_LOG(ERR, "Invalid parameter");
4533                 return -EINVAL;
4534         }
4535
4536         pool_offset = base - pool->base;
4537         /* Lookup in alloc list */
4538         LIST_FOREACH(entry, &pool->alloc_list, next) {
4539                 if (entry->base == pool_offset) {
4540                         valid_entry = entry;
4541                         LIST_REMOVE(entry, next);
4542                         break;
4543                 }
4544         }
4545
4546         /* Not find, return */
4547         if (valid_entry == NULL) {
4548                 PMD_DRV_LOG(ERR, "Failed to find entry");
4549                 return -EINVAL;
4550         }
4551
4552         /**
4553          * Found it, move it to free list  and try to merge.
4554          * In order to make merge easier, always sort it by qbase.
4555          * Find adjacent prev and last entries.
4556          */
4557         prev = next = NULL;
4558         LIST_FOREACH(entry, &pool->free_list, next) {
4559                 if (entry->base > valid_entry->base) {
4560                         next = entry;
4561                         break;
4562                 }
4563                 prev = entry;
4564         }
4565
4566         insert = 0;
4567         /* Try to merge with next one*/
4568         if (next != NULL) {
4569                 /* Merge with next one */
4570                 if (valid_entry->base + valid_entry->len == next->base) {
4571                         next->base = valid_entry->base;
4572                         next->len += valid_entry->len;
4573                         rte_free(valid_entry);
4574                         valid_entry = next;
4575                         insert = 1;
4576                 }
4577         }
4578
4579         if (prev != NULL) {
4580                 /* Merge with previous one */
4581                 if (prev->base + prev->len == valid_entry->base) {
4582                         prev->len += valid_entry->len;
4583                         /* If it merge with next one, remove next node */
4584                         if (insert == 1) {
4585                                 LIST_REMOVE(valid_entry, next);
4586                                 rte_free(valid_entry);
4587                         } else {
4588                                 rte_free(valid_entry);
4589                                 insert = 1;
4590                         }
4591                 }
4592         }
4593
4594         /* Not find any entry to merge, insert */
4595         if (insert == 0) {
4596                 if (prev != NULL)
4597                         LIST_INSERT_AFTER(prev, valid_entry, next);
4598                 else if (next != NULL)
4599                         LIST_INSERT_BEFORE(next, valid_entry, next);
4600                 else /* It's empty list, insert to head */
4601                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4602         }
4603
4604         pool->num_free += valid_entry->len;
4605         pool->num_alloc -= valid_entry->len;
4606
4607         return 0;
4608 }
4609
4610 static int
4611 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4612                        uint16_t num)
4613 {
4614         struct pool_entry *entry, *valid_entry;
4615
4616         if (pool == NULL || num == 0) {
4617                 PMD_DRV_LOG(ERR, "Invalid parameter");
4618                 return -EINVAL;
4619         }
4620
4621         if (pool->num_free < num) {
4622                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4623                             num, pool->num_free);
4624                 return -ENOMEM;
4625         }
4626
4627         valid_entry = NULL;
4628         /* Lookup  in free list and find most fit one */
4629         LIST_FOREACH(entry, &pool->free_list, next) {
4630                 if (entry->len >= num) {
4631                         /* Find best one */
4632                         if (entry->len == num) {
4633                                 valid_entry = entry;
4634                                 break;
4635                         }
4636                         if (valid_entry == NULL || valid_entry->len > entry->len)
4637                                 valid_entry = entry;
4638                 }
4639         }
4640
4641         /* Not find one to satisfy the request, return */
4642         if (valid_entry == NULL) {
4643                 PMD_DRV_LOG(ERR, "No valid entry found");
4644                 return -ENOMEM;
4645         }
4646         /**
4647          * The entry have equal queue number as requested,
4648          * remove it from alloc_list.
4649          */
4650         if (valid_entry->len == num) {
4651                 LIST_REMOVE(valid_entry, next);
4652         } else {
4653                 /**
4654                  * The entry have more numbers than requested,
4655                  * create a new entry for alloc_list and minus its
4656                  * queue base and number in free_list.
4657                  */
4658                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4659                 if (entry == NULL) {
4660                         PMD_DRV_LOG(ERR,
4661                                 "Failed to allocate memory for resource pool");
4662                         return -ENOMEM;
4663                 }
4664                 entry->base = valid_entry->base;
4665                 entry->len = num;
4666                 valid_entry->base += num;
4667                 valid_entry->len -= num;
4668                 valid_entry = entry;
4669         }
4670
4671         /* Insert it into alloc list, not sorted */
4672         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4673
4674         pool->num_free -= valid_entry->len;
4675         pool->num_alloc += valid_entry->len;
4676
4677         return valid_entry->base + pool->base;
4678 }
4679
4680 /**
4681  * bitmap_is_subset - Check whether src2 is subset of src1
4682  **/
4683 static inline int
4684 bitmap_is_subset(uint8_t src1, uint8_t src2)
4685 {
4686         return !((src1 ^ src2) & src2);
4687 }
4688
4689 static enum i40e_status_code
4690 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4691 {
4692         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4693
4694         /* If DCB is not supported, only default TC is supported */
4695         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4696                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4697                 return I40E_NOT_SUPPORTED;
4698         }
4699
4700         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4701                 PMD_DRV_LOG(ERR,
4702                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4703                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4704                 return I40E_NOT_SUPPORTED;
4705         }
4706         return I40E_SUCCESS;
4707 }
4708
4709 int
4710 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4711                                 struct i40e_vsi_vlan_pvid_info *info)
4712 {
4713         struct i40e_hw *hw;
4714         struct i40e_vsi_context ctxt;
4715         uint8_t vlan_flags = 0;
4716         int ret;
4717
4718         if (vsi == NULL || info == NULL) {
4719                 PMD_DRV_LOG(ERR, "invalid parameters");
4720                 return I40E_ERR_PARAM;
4721         }
4722
4723         if (info->on) {
4724                 vsi->info.pvid = info->config.pvid;
4725                 /**
4726                  * If insert pvid is enabled, only tagged pkts are
4727                  * allowed to be sent out.
4728                  */
4729                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4730                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4731         } else {
4732                 vsi->info.pvid = 0;
4733                 if (info->config.reject.tagged == 0)
4734                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4735
4736                 if (info->config.reject.untagged == 0)
4737                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4738         }
4739         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4740                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4741         vsi->info.port_vlan_flags |= vlan_flags;
4742         vsi->info.valid_sections =
4743                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4744         memset(&ctxt, 0, sizeof(ctxt));
4745         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4746         ctxt.seid = vsi->seid;
4747
4748         hw = I40E_VSI_TO_HW(vsi);
4749         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4750         if (ret != I40E_SUCCESS)
4751                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4752
4753         return ret;
4754 }
4755
4756 static int
4757 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4758 {
4759         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4760         int i, ret;
4761         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4762
4763         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4764         if (ret != I40E_SUCCESS)
4765                 return ret;
4766
4767         if (!vsi->seid) {
4768                 PMD_DRV_LOG(ERR, "seid not valid");
4769                 return -EINVAL;
4770         }
4771
4772         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4773         tc_bw_data.tc_valid_bits = enabled_tcmap;
4774         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4775                 tc_bw_data.tc_bw_credits[i] =
4776                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4777
4778         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4779         if (ret != I40E_SUCCESS) {
4780                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4781                 return ret;
4782         }
4783
4784         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4785                                         sizeof(vsi->info.qs_handle));
4786         return I40E_SUCCESS;
4787 }
4788
4789 static enum i40e_status_code
4790 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4791                                  struct i40e_aqc_vsi_properties_data *info,
4792                                  uint8_t enabled_tcmap)
4793 {
4794         enum i40e_status_code ret;
4795         int i, total_tc = 0;
4796         uint16_t qpnum_per_tc, bsf, qp_idx;
4797
4798         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4799         if (ret != I40E_SUCCESS)
4800                 return ret;
4801
4802         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4803                 if (enabled_tcmap & (1 << i))
4804                         total_tc++;
4805         if (total_tc == 0)
4806                 total_tc = 1;
4807         vsi->enabled_tc = enabled_tcmap;
4808
4809         /* Number of queues per enabled TC */
4810         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4811         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4812         bsf = rte_bsf32(qpnum_per_tc);
4813
4814         /* Adjust the queue number to actual queues that can be applied */
4815         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4816                 vsi->nb_qps = qpnum_per_tc * total_tc;
4817
4818         /**
4819          * Configure TC and queue mapping parameters, for enabled TC,
4820          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4821          * default queue will serve it.
4822          */
4823         qp_idx = 0;
4824         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4825                 if (vsi->enabled_tc & (1 << i)) {
4826                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4827                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4828                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4829                         qp_idx += qpnum_per_tc;
4830                 } else
4831                         info->tc_mapping[i] = 0;
4832         }
4833
4834         /* Associate queue number with VSI */
4835         if (vsi->type == I40E_VSI_SRIOV) {
4836                 info->mapping_flags |=
4837                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4838                 for (i = 0; i < vsi->nb_qps; i++)
4839                         info->queue_mapping[i] =
4840                                 rte_cpu_to_le_16(vsi->base_queue + i);
4841         } else {
4842                 info->mapping_flags |=
4843                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4844                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4845         }
4846         info->valid_sections |=
4847                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4848
4849         return I40E_SUCCESS;
4850 }
4851
4852 static int
4853 i40e_veb_release(struct i40e_veb *veb)
4854 {
4855         struct i40e_vsi *vsi;
4856         struct i40e_hw *hw;
4857
4858         if (veb == NULL)
4859                 return -EINVAL;
4860
4861         if (!TAILQ_EMPTY(&veb->head)) {
4862                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4863                 return -EACCES;
4864         }
4865         /* associate_vsi field is NULL for floating VEB */
4866         if (veb->associate_vsi != NULL) {
4867                 vsi = veb->associate_vsi;
4868                 hw = I40E_VSI_TO_HW(vsi);
4869
4870                 vsi->uplink_seid = veb->uplink_seid;
4871                 vsi->veb = NULL;
4872         } else {
4873                 veb->associate_pf->main_vsi->floating_veb = NULL;
4874                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4875         }
4876
4877         i40e_aq_delete_element(hw, veb->seid, NULL);
4878         rte_free(veb);
4879         return I40E_SUCCESS;
4880 }
4881
4882 /* Setup a veb */
4883 static struct i40e_veb *
4884 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4885 {
4886         struct i40e_veb *veb;
4887         int ret;
4888         struct i40e_hw *hw;
4889
4890         if (pf == NULL) {
4891                 PMD_DRV_LOG(ERR,
4892                             "veb setup failed, associated PF shouldn't null");
4893                 return NULL;
4894         }
4895         hw = I40E_PF_TO_HW(pf);
4896
4897         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4898         if (!veb) {
4899                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4900                 goto fail;
4901         }
4902
4903         veb->associate_vsi = vsi;
4904         veb->associate_pf = pf;
4905         TAILQ_INIT(&veb->head);
4906         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4907
4908         /* create floating veb if vsi is NULL */
4909         if (vsi != NULL) {
4910                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4911                                       I40E_DEFAULT_TCMAP, false,
4912                                       &veb->seid, false, NULL);
4913         } else {
4914                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4915                                       true, &veb->seid, false, NULL);
4916         }
4917
4918         if (ret != I40E_SUCCESS) {
4919                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4920                             hw->aq.asq_last_status);
4921                 goto fail;
4922         }
4923         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4924
4925         /* get statistics index */
4926         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4927                                 &veb->stats_idx, NULL, NULL, NULL);
4928         if (ret != I40E_SUCCESS) {
4929                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4930                             hw->aq.asq_last_status);
4931                 goto fail;
4932         }
4933         /* Get VEB bandwidth, to be implemented */
4934         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4935         if (vsi)
4936                 vsi->uplink_seid = veb->seid;
4937
4938         return veb;
4939 fail:
4940         rte_free(veb);
4941         return NULL;
4942 }
4943
4944 int
4945 i40e_vsi_release(struct i40e_vsi *vsi)
4946 {
4947         struct i40e_pf *pf;
4948         struct i40e_hw *hw;
4949         struct i40e_vsi_list *vsi_list;
4950         void *temp;
4951         int ret;
4952         struct i40e_mac_filter *f;
4953         uint16_t user_param;
4954
4955         if (!vsi)
4956                 return I40E_SUCCESS;
4957
4958         if (!vsi->adapter)
4959                 return -EFAULT;
4960
4961         user_param = vsi->user_param;
4962
4963         pf = I40E_VSI_TO_PF(vsi);
4964         hw = I40E_VSI_TO_HW(vsi);
4965
4966         /* VSI has child to attach, release child first */
4967         if (vsi->veb) {
4968                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4969                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4970                                 return -1;
4971                 }
4972                 i40e_veb_release(vsi->veb);
4973         }
4974
4975         if (vsi->floating_veb) {
4976                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4977                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4978                                 return -1;
4979                 }
4980         }
4981
4982         /* Remove all macvlan filters of the VSI */
4983         i40e_vsi_remove_all_macvlan_filter(vsi);
4984         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4985                 rte_free(f);
4986
4987         if (vsi->type != I40E_VSI_MAIN &&
4988             ((vsi->type != I40E_VSI_SRIOV) ||
4989             !pf->floating_veb_list[user_param])) {
4990                 /* Remove vsi from parent's sibling list */
4991                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4992                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4993                         return I40E_ERR_PARAM;
4994                 }
4995                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4996                                 &vsi->sib_vsi_list, list);
4997
4998                 /* Remove all switch element of the VSI */
4999                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5000                 if (ret != I40E_SUCCESS)
5001                         PMD_DRV_LOG(ERR, "Failed to delete element");
5002         }
5003
5004         if ((vsi->type == I40E_VSI_SRIOV) &&
5005             pf->floating_veb_list[user_param]) {
5006                 /* Remove vsi from parent's sibling list */
5007                 if (vsi->parent_vsi == NULL ||
5008                     vsi->parent_vsi->floating_veb == NULL) {
5009                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5010                         return I40E_ERR_PARAM;
5011                 }
5012                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5013                              &vsi->sib_vsi_list, list);
5014
5015                 /* Remove all switch element of the VSI */
5016                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5017                 if (ret != I40E_SUCCESS)
5018                         PMD_DRV_LOG(ERR, "Failed to delete element");
5019         }
5020
5021         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5022
5023         if (vsi->type != I40E_VSI_SRIOV)
5024                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5025         rte_free(vsi);
5026
5027         return I40E_SUCCESS;
5028 }
5029
5030 static int
5031 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5032 {
5033         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5034         struct i40e_aqc_remove_macvlan_element_data def_filter;
5035         struct i40e_mac_filter_info filter;
5036         int ret;
5037
5038         if (vsi->type != I40E_VSI_MAIN)
5039                 return I40E_ERR_CONFIG;
5040         memset(&def_filter, 0, sizeof(def_filter));
5041         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5042                                         ETH_ADDR_LEN);
5043         def_filter.vlan_tag = 0;
5044         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5045                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5046         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5047         if (ret != I40E_SUCCESS) {
5048                 struct i40e_mac_filter *f;
5049                 struct ether_addr *mac;
5050
5051                 PMD_DRV_LOG(DEBUG,
5052                             "Cannot remove the default macvlan filter");
5053                 /* It needs to add the permanent mac into mac list */
5054                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5055                 if (f == NULL) {
5056                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5057                         return I40E_ERR_NO_MEMORY;
5058                 }
5059                 mac = &f->mac_info.mac_addr;
5060                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5061                                 ETH_ADDR_LEN);
5062                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5063                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5064                 vsi->mac_num++;
5065
5066                 return ret;
5067         }
5068         rte_memcpy(&filter.mac_addr,
5069                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5070         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5071         return i40e_vsi_add_mac(vsi, &filter);
5072 }
5073
5074 /*
5075  * i40e_vsi_get_bw_config - Query VSI BW Information
5076  * @vsi: the VSI to be queried
5077  *
5078  * Returns 0 on success, negative value on failure
5079  */
5080 static enum i40e_status_code
5081 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5082 {
5083         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5084         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5085         struct i40e_hw *hw = &vsi->adapter->hw;
5086         i40e_status ret;
5087         int i;
5088         uint32_t bw_max;
5089
5090         memset(&bw_config, 0, sizeof(bw_config));
5091         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5092         if (ret != I40E_SUCCESS) {
5093                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5094                             hw->aq.asq_last_status);
5095                 return ret;
5096         }
5097
5098         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5099         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5100                                         &ets_sla_config, NULL);
5101         if (ret != I40E_SUCCESS) {
5102                 PMD_DRV_LOG(ERR,
5103                         "VSI failed to get TC bandwdith configuration %u",
5104                         hw->aq.asq_last_status);
5105                 return ret;
5106         }
5107
5108         /* store and print out BW info */
5109         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5110         vsi->bw_info.bw_max = bw_config.max_bw;
5111         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5112         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5113         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5114                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5115                      I40E_16_BIT_WIDTH);
5116         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5117                 vsi->bw_info.bw_ets_share_credits[i] =
5118                                 ets_sla_config.share_credits[i];
5119                 vsi->bw_info.bw_ets_credits[i] =
5120                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5121                 /* 4 bits per TC, 4th bit is reserved */
5122                 vsi->bw_info.bw_ets_max[i] =
5123                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5124                                   RTE_LEN2MASK(3, uint8_t));
5125                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5126                             vsi->bw_info.bw_ets_share_credits[i]);
5127                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5128                             vsi->bw_info.bw_ets_credits[i]);
5129                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5130                             vsi->bw_info.bw_ets_max[i]);
5131         }
5132
5133         return I40E_SUCCESS;
5134 }
5135
5136 /* i40e_enable_pf_lb
5137  * @pf: pointer to the pf structure
5138  *
5139  * allow loopback on pf
5140  */
5141 static inline void
5142 i40e_enable_pf_lb(struct i40e_pf *pf)
5143 {
5144         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5145         struct i40e_vsi_context ctxt;
5146         int ret;
5147
5148         /* Use the FW API if FW >= v5.0 */
5149         if (hw->aq.fw_maj_ver < 5) {
5150                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5151                 return;
5152         }
5153
5154         memset(&ctxt, 0, sizeof(ctxt));
5155         ctxt.seid = pf->main_vsi_seid;
5156         ctxt.pf_num = hw->pf_id;
5157         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5158         if (ret) {
5159                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5160                             ret, hw->aq.asq_last_status);
5161                 return;
5162         }
5163         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5164         ctxt.info.valid_sections =
5165                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5166         ctxt.info.switch_id |=
5167                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5168
5169         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5170         if (ret)
5171                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5172                             hw->aq.asq_last_status);
5173 }
5174
5175 /* Setup a VSI */
5176 struct i40e_vsi *
5177 i40e_vsi_setup(struct i40e_pf *pf,
5178                enum i40e_vsi_type type,
5179                struct i40e_vsi *uplink_vsi,
5180                uint16_t user_param)
5181 {
5182         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5183         struct i40e_vsi *vsi;
5184         struct i40e_mac_filter_info filter;
5185         int ret;
5186         struct i40e_vsi_context ctxt;
5187         struct ether_addr broadcast =
5188                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5189
5190         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5191             uplink_vsi == NULL) {
5192                 PMD_DRV_LOG(ERR,
5193                         "VSI setup failed, VSI link shouldn't be NULL");
5194                 return NULL;
5195         }
5196
5197         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5198                 PMD_DRV_LOG(ERR,
5199                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5200                 return NULL;
5201         }
5202
5203         /* two situations
5204          * 1.type is not MAIN and uplink vsi is not NULL
5205          * If uplink vsi didn't setup VEB, create one first under veb field
5206          * 2.type is SRIOV and the uplink is NULL
5207          * If floating VEB is NULL, create one veb under floating veb field
5208          */
5209
5210         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5211             uplink_vsi->veb == NULL) {
5212                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5213
5214                 if (uplink_vsi->veb == NULL) {
5215                         PMD_DRV_LOG(ERR, "VEB setup failed");
5216                         return NULL;
5217                 }
5218                 /* set ALLOWLOOPBACk on pf, when veb is created */
5219                 i40e_enable_pf_lb(pf);
5220         }
5221
5222         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5223             pf->main_vsi->floating_veb == NULL) {
5224                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5225
5226                 if (pf->main_vsi->floating_veb == NULL) {
5227                         PMD_DRV_LOG(ERR, "VEB setup failed");
5228                         return NULL;
5229                 }
5230         }
5231
5232         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5233         if (!vsi) {
5234                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5235                 return NULL;
5236         }
5237         TAILQ_INIT(&vsi->mac_list);
5238         vsi->type = type;
5239         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5240         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5241         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5242         vsi->user_param = user_param;
5243         vsi->vlan_anti_spoof_on = 0;
5244         vsi->vlan_filter_on = 0;
5245         /* Allocate queues */
5246         switch (vsi->type) {
5247         case I40E_VSI_MAIN  :
5248                 vsi->nb_qps = pf->lan_nb_qps;
5249                 break;
5250         case I40E_VSI_SRIOV :
5251                 vsi->nb_qps = pf->vf_nb_qps;
5252                 break;
5253         case I40E_VSI_VMDQ2:
5254                 vsi->nb_qps = pf->vmdq_nb_qps;
5255                 break;
5256         case I40E_VSI_FDIR:
5257                 vsi->nb_qps = pf->fdir_nb_qps;
5258                 break;
5259         default:
5260                 goto fail_mem;
5261         }
5262         /*
5263          * The filter status descriptor is reported in rx queue 0,
5264          * while the tx queue for fdir filter programming has no
5265          * such constraints, can be non-zero queues.
5266          * To simplify it, choose FDIR vsi use queue 0 pair.
5267          * To make sure it will use queue 0 pair, queue allocation
5268          * need be done before this function is called
5269          */
5270         if (type != I40E_VSI_FDIR) {
5271                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5272                         if (ret < 0) {
5273                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5274                                                 vsi->seid, ret);
5275                                 goto fail_mem;
5276                         }
5277                         vsi->base_queue = ret;
5278         } else
5279                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5280
5281         /* VF has MSIX interrupt in VF range, don't allocate here */
5282         if (type == I40E_VSI_MAIN) {
5283                 if (pf->support_multi_driver) {
5284                         /* If support multi-driver, need to use INT0 instead of
5285                          * allocating from msix pool. The Msix pool is init from
5286                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5287                          * to 1 without calling i40e_res_pool_alloc.
5288                          */
5289                         vsi->msix_intr = 0;
5290                         vsi->nb_msix = 1;
5291                 } else {
5292                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5293                                                   RTE_MIN(vsi->nb_qps,
5294                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5295                         if (ret < 0) {
5296                                 PMD_DRV_LOG(ERR,
5297                                             "VSI MAIN %d get heap failed %d",
5298                                             vsi->seid, ret);
5299                                 goto fail_queue_alloc;
5300                         }
5301                         vsi->msix_intr = ret;
5302                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5303                                                RTE_MAX_RXTX_INTR_VEC_ID);
5304                 }
5305         } else if (type != I40E_VSI_SRIOV) {
5306                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5307                 if (ret < 0) {
5308                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5309                         goto fail_queue_alloc;
5310                 }
5311                 vsi->msix_intr = ret;
5312                 vsi->nb_msix = 1;
5313         } else {
5314                 vsi->msix_intr = 0;
5315                 vsi->nb_msix = 0;
5316         }
5317
5318         /* Add VSI */
5319         if (type == I40E_VSI_MAIN) {
5320                 /* For main VSI, no need to add since it's default one */
5321                 vsi->uplink_seid = pf->mac_seid;
5322                 vsi->seid = pf->main_vsi_seid;
5323                 /* Bind queues with specific MSIX interrupt */
5324                 /**
5325                  * Needs 2 interrupt at least, one for misc cause which will
5326                  * enabled from OS side, Another for queues binding the
5327                  * interrupt from device side only.
5328                  */
5329
5330                 /* Get default VSI parameters from hardware */
5331                 memset(&ctxt, 0, sizeof(ctxt));
5332                 ctxt.seid = vsi->seid;
5333                 ctxt.pf_num = hw->pf_id;
5334                 ctxt.uplink_seid = vsi->uplink_seid;
5335                 ctxt.vf_num = 0;
5336                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5337                 if (ret != I40E_SUCCESS) {
5338                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5339                         goto fail_msix_alloc;
5340                 }
5341                 rte_memcpy(&vsi->info, &ctxt.info,
5342                         sizeof(struct i40e_aqc_vsi_properties_data));
5343                 vsi->vsi_id = ctxt.vsi_number;
5344                 vsi->info.valid_sections = 0;
5345
5346                 /* Configure tc, enabled TC0 only */
5347                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5348                         I40E_SUCCESS) {
5349                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5350                         goto fail_msix_alloc;
5351                 }
5352
5353                 /* TC, queue mapping */
5354                 memset(&ctxt, 0, sizeof(ctxt));
5355                 vsi->info.valid_sections |=
5356                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5357                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5358                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5359                 rte_memcpy(&ctxt.info, &vsi->info,
5360                         sizeof(struct i40e_aqc_vsi_properties_data));
5361                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5362                                                 I40E_DEFAULT_TCMAP);
5363                 if (ret != I40E_SUCCESS) {
5364                         PMD_DRV_LOG(ERR,
5365                                 "Failed to configure TC queue mapping");
5366                         goto fail_msix_alloc;
5367                 }
5368                 ctxt.seid = vsi->seid;
5369                 ctxt.pf_num = hw->pf_id;
5370                 ctxt.uplink_seid = vsi->uplink_seid;
5371                 ctxt.vf_num = 0;
5372
5373                 /* Update VSI parameters */
5374                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5375                 if (ret != I40E_SUCCESS) {
5376                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5377                         goto fail_msix_alloc;
5378                 }
5379
5380                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5381                                                 sizeof(vsi->info.tc_mapping));
5382                 rte_memcpy(&vsi->info.queue_mapping,
5383                                 &ctxt.info.queue_mapping,
5384                         sizeof(vsi->info.queue_mapping));
5385                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5386                 vsi->info.valid_sections = 0;
5387
5388                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5389                                 ETH_ADDR_LEN);
5390
5391                 /**
5392                  * Updating default filter settings are necessary to prevent
5393                  * reception of tagged packets.
5394                  * Some old firmware configurations load a default macvlan
5395                  * filter which accepts both tagged and untagged packets.
5396                  * The updating is to use a normal filter instead if needed.
5397                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5398                  * The firmware with correct configurations load the default
5399                  * macvlan filter which is expected and cannot be removed.
5400                  */
5401                 i40e_update_default_filter_setting(vsi);
5402                 i40e_config_qinq(hw, vsi);
5403         } else if (type == I40E_VSI_SRIOV) {
5404                 memset(&ctxt, 0, sizeof(ctxt));
5405                 /**
5406                  * For other VSI, the uplink_seid equals to uplink VSI's
5407                  * uplink_seid since they share same VEB
5408                  */
5409                 if (uplink_vsi == NULL)
5410                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5411                 else
5412                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5413                 ctxt.pf_num = hw->pf_id;
5414                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5415                 ctxt.uplink_seid = vsi->uplink_seid;
5416                 ctxt.connection_type = 0x1;
5417                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5418
5419                 /* Use the VEB configuration if FW >= v5.0 */
5420                 if (hw->aq.fw_maj_ver >= 5) {
5421                         /* Configure switch ID */
5422                         ctxt.info.valid_sections |=
5423                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5424                         ctxt.info.switch_id =
5425                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5426                 }
5427
5428                 /* Configure port/vlan */
5429                 ctxt.info.valid_sections |=
5430                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5431                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5432                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5433                                                 hw->func_caps.enabled_tcmap);
5434                 if (ret != I40E_SUCCESS) {
5435                         PMD_DRV_LOG(ERR,
5436                                 "Failed to configure TC queue mapping");
5437                         goto fail_msix_alloc;
5438                 }
5439
5440                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5441                 ctxt.info.valid_sections |=
5442                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5443                 /**
5444                  * Since VSI is not created yet, only configure parameter,
5445                  * will add vsi below.
5446                  */
5447
5448                 i40e_config_qinq(hw, vsi);
5449         } else if (type == I40E_VSI_VMDQ2) {
5450                 memset(&ctxt, 0, sizeof(ctxt));
5451                 /*
5452                  * For other VSI, the uplink_seid equals to uplink VSI's
5453                  * uplink_seid since they share same VEB
5454                  */
5455                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5456                 ctxt.pf_num = hw->pf_id;
5457                 ctxt.vf_num = 0;
5458                 ctxt.uplink_seid = vsi->uplink_seid;
5459                 ctxt.connection_type = 0x1;
5460                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5461
5462                 ctxt.info.valid_sections |=
5463                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5464                 /* user_param carries flag to enable loop back */
5465                 if (user_param) {
5466                         ctxt.info.switch_id =
5467                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5468                         ctxt.info.switch_id |=
5469                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5470                 }
5471
5472                 /* Configure port/vlan */
5473                 ctxt.info.valid_sections |=
5474                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5475                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5476                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5477                                                 I40E_DEFAULT_TCMAP);
5478                 if (ret != I40E_SUCCESS) {
5479                         PMD_DRV_LOG(ERR,
5480                                 "Failed to configure TC queue mapping");
5481                         goto fail_msix_alloc;
5482                 }
5483                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5484                 ctxt.info.valid_sections |=
5485                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5486         } else if (type == I40E_VSI_FDIR) {
5487                 memset(&ctxt, 0, sizeof(ctxt));
5488                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5489                 ctxt.pf_num = hw->pf_id;
5490                 ctxt.vf_num = 0;
5491                 ctxt.uplink_seid = vsi->uplink_seid;
5492                 ctxt.connection_type = 0x1;     /* regular data port */
5493                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5494                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5495                                                 I40E_DEFAULT_TCMAP);
5496                 if (ret != I40E_SUCCESS) {
5497                         PMD_DRV_LOG(ERR,
5498                                 "Failed to configure TC queue mapping.");
5499                         goto fail_msix_alloc;
5500                 }
5501                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5502                 ctxt.info.valid_sections |=
5503                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5504         } else {
5505                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5506                 goto fail_msix_alloc;
5507         }
5508
5509         if (vsi->type != I40E_VSI_MAIN) {
5510                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5511                 if (ret != I40E_SUCCESS) {
5512                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5513                                     hw->aq.asq_last_status);
5514                         goto fail_msix_alloc;
5515                 }
5516                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5517                 vsi->info.valid_sections = 0;
5518                 vsi->seid = ctxt.seid;
5519                 vsi->vsi_id = ctxt.vsi_number;
5520                 vsi->sib_vsi_list.vsi = vsi;
5521                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5522                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5523                                           &vsi->sib_vsi_list, list);
5524                 } else {
5525                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5526                                           &vsi->sib_vsi_list, list);
5527                 }
5528         }
5529
5530         /* MAC/VLAN configuration */
5531         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5532         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5533
5534         ret = i40e_vsi_add_mac(vsi, &filter);
5535         if (ret != I40E_SUCCESS) {
5536                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5537                 goto fail_msix_alloc;
5538         }
5539
5540         /* Get VSI BW information */
5541         i40e_vsi_get_bw_config(vsi);
5542         return vsi;
5543 fail_msix_alloc:
5544         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5545 fail_queue_alloc:
5546         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5547 fail_mem:
5548         rte_free(vsi);
5549         return NULL;
5550 }
5551
5552 /* Configure vlan filter on or off */
5553 int
5554 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5555 {
5556         int i, num;
5557         struct i40e_mac_filter *f;
5558         void *temp;
5559         struct i40e_mac_filter_info *mac_filter;
5560         enum rte_mac_filter_type desired_filter;
5561         int ret = I40E_SUCCESS;
5562
5563         if (on) {
5564                 /* Filter to match MAC and VLAN */
5565                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5566         } else {
5567                 /* Filter to match only MAC */
5568                 desired_filter = RTE_MAC_PERFECT_MATCH;
5569         }
5570
5571         num = vsi->mac_num;
5572
5573         mac_filter = rte_zmalloc("mac_filter_info_data",
5574                                  num * sizeof(*mac_filter), 0);
5575         if (mac_filter == NULL) {
5576                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5577                 return I40E_ERR_NO_MEMORY;
5578         }
5579
5580         i = 0;
5581
5582         /* Remove all existing mac */
5583         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5584                 mac_filter[i] = f->mac_info;
5585                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5586                 if (ret) {
5587                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5588                                     on ? "enable" : "disable");
5589                         goto DONE;
5590                 }
5591                 i++;
5592         }
5593
5594         /* Override with new filter */
5595         for (i = 0; i < num; i++) {
5596                 mac_filter[i].filter_type = desired_filter;
5597                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5598                 if (ret) {
5599                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5600                                     on ? "enable" : "disable");
5601                         goto DONE;
5602                 }
5603         }
5604
5605 DONE:
5606         rte_free(mac_filter);
5607         return ret;
5608 }
5609
5610 /* Configure vlan stripping on or off */
5611 int
5612 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5613 {
5614         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5615         struct i40e_vsi_context ctxt;
5616         uint8_t vlan_flags;
5617         int ret = I40E_SUCCESS;
5618
5619         /* Check if it has been already on or off */
5620         if (vsi->info.valid_sections &
5621                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5622                 if (on) {
5623                         if ((vsi->info.port_vlan_flags &
5624                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5625                                 return 0; /* already on */
5626                 } else {
5627                         if ((vsi->info.port_vlan_flags &
5628                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5629                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5630                                 return 0; /* already off */
5631                 }
5632         }
5633
5634         if (on)
5635                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5636         else
5637                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5638         vsi->info.valid_sections =
5639                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5640         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5641         vsi->info.port_vlan_flags |= vlan_flags;
5642         ctxt.seid = vsi->seid;
5643         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5644         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5645         if (ret)
5646                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5647                             on ? "enable" : "disable");
5648
5649         return ret;
5650 }
5651
5652 static int
5653 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5654 {
5655         struct rte_eth_dev_data *data = dev->data;
5656         int ret;
5657         int mask = 0;
5658
5659         /* Apply vlan offload setting */
5660         mask = ETH_VLAN_STRIP_MASK |
5661                ETH_VLAN_FILTER_MASK |
5662                ETH_VLAN_EXTEND_MASK;
5663         ret = i40e_vlan_offload_set(dev, mask);
5664         if (ret) {
5665                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5666                 return ret;
5667         }
5668
5669         /* Apply pvid setting */
5670         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5671                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5672         if (ret)
5673                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5674
5675         return ret;
5676 }
5677
5678 static int
5679 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5680 {
5681         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5682
5683         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5684 }
5685
5686 static int
5687 i40e_update_flow_control(struct i40e_hw *hw)
5688 {
5689 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5690         struct i40e_link_status link_status;
5691         uint32_t rxfc = 0, txfc = 0, reg;
5692         uint8_t an_info;
5693         int ret;
5694
5695         memset(&link_status, 0, sizeof(link_status));
5696         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5697         if (ret != I40E_SUCCESS) {
5698                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5699                 goto write_reg; /* Disable flow control */
5700         }
5701
5702         an_info = hw->phy.link_info.an_info;
5703         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5704                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5705                 ret = I40E_ERR_NOT_READY;
5706                 goto write_reg; /* Disable flow control */
5707         }
5708         /**
5709          * If link auto negotiation is enabled, flow control needs to
5710          * be configured according to it
5711          */
5712         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5713         case I40E_LINK_PAUSE_RXTX:
5714                 rxfc = 1;
5715                 txfc = 1;
5716                 hw->fc.current_mode = I40E_FC_FULL;
5717                 break;
5718         case I40E_AQ_LINK_PAUSE_RX:
5719                 rxfc = 1;
5720                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5721                 break;
5722         case I40E_AQ_LINK_PAUSE_TX:
5723                 txfc = 1;
5724                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5725                 break;
5726         default:
5727                 hw->fc.current_mode = I40E_FC_NONE;
5728                 break;
5729         }
5730
5731 write_reg:
5732         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5733                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5734         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5735         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5736         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5737         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5738
5739         return ret;
5740 }
5741
5742 /* PF setup */
5743 static int
5744 i40e_pf_setup(struct i40e_pf *pf)
5745 {
5746         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5747         struct i40e_filter_control_settings settings;
5748         struct i40e_vsi *vsi;
5749         int ret;
5750
5751         /* Clear all stats counters */
5752         pf->offset_loaded = FALSE;
5753         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5754         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5755         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5756         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5757
5758         ret = i40e_pf_get_switch_config(pf);
5759         if (ret != I40E_SUCCESS) {
5760                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5761                 return ret;
5762         }
5763         if (pf->flags & I40E_FLAG_FDIR) {
5764                 /* make queue allocated first, let FDIR use queue pair 0*/
5765                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5766                 if (ret != I40E_FDIR_QUEUE_ID) {
5767                         PMD_DRV_LOG(ERR,
5768                                 "queue allocation fails for FDIR: ret =%d",
5769                                 ret);
5770                         pf->flags &= ~I40E_FLAG_FDIR;
5771                 }
5772         }
5773         /*  main VSI setup */
5774         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5775         if (!vsi) {
5776                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5777                 return I40E_ERR_NOT_READY;
5778         }
5779         pf->main_vsi = vsi;
5780
5781         /* Configure filter control */
5782         memset(&settings, 0, sizeof(settings));
5783         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5784                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5785         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5786                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5787         else {
5788                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5789                         hw->func_caps.rss_table_size);
5790                 return I40E_ERR_PARAM;
5791         }
5792         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5793                 hw->func_caps.rss_table_size);
5794         pf->hash_lut_size = hw->func_caps.rss_table_size;
5795
5796         /* Enable ethtype and macvlan filters */
5797         settings.enable_ethtype = TRUE;
5798         settings.enable_macvlan = TRUE;
5799         ret = i40e_set_filter_control(hw, &settings);
5800         if (ret)
5801                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5802                                                                 ret);
5803
5804         /* Update flow control according to the auto negotiation */
5805         i40e_update_flow_control(hw);
5806
5807         return I40E_SUCCESS;
5808 }
5809
5810 int
5811 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5812 {
5813         uint32_t reg;
5814         uint16_t j;
5815
5816         /**
5817          * Set or clear TX Queue Disable flags,
5818          * which is required by hardware.
5819          */
5820         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5821         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5822
5823         /* Wait until the request is finished */
5824         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5825                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5826                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5827                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5828                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5829                                                         & 0x1))) {
5830                         break;
5831                 }
5832         }
5833         if (on) {
5834                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5835                         return I40E_SUCCESS; /* already on, skip next steps */
5836
5837                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5838                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5839         } else {
5840                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5841                         return I40E_SUCCESS; /* already off, skip next steps */
5842                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5843         }
5844         /* Write the register */
5845         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5846         /* Check the result */
5847         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5848                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5849                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5850                 if (on) {
5851                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5852                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5853                                 break;
5854                 } else {
5855                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5856                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5857                                 break;
5858                 }
5859         }
5860         /* Check if it is timeout */
5861         if (j >= I40E_CHK_Q_ENA_COUNT) {
5862                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5863                             (on ? "enable" : "disable"), q_idx);
5864                 return I40E_ERR_TIMEOUT;
5865         }
5866
5867         return I40E_SUCCESS;
5868 }
5869
5870 /* Swith on or off the tx queues */
5871 static int
5872 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5873 {
5874         struct rte_eth_dev_data *dev_data = pf->dev_data;
5875         struct i40e_tx_queue *txq;
5876         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5877         uint16_t i;
5878         int ret;
5879
5880         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5881                 txq = dev_data->tx_queues[i];
5882                 /* Don't operate the queue if not configured or
5883                  * if starting only per queue */
5884                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5885                         continue;
5886                 if (on)
5887                         ret = i40e_dev_tx_queue_start(dev, i);
5888                 else
5889                         ret = i40e_dev_tx_queue_stop(dev, i);
5890                 if ( ret != I40E_SUCCESS)
5891                         return ret;
5892         }
5893
5894         return I40E_SUCCESS;
5895 }
5896
5897 int
5898 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5899 {
5900         uint32_t reg;
5901         uint16_t j;
5902
5903         /* Wait until the request is finished */
5904         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5905                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5906                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5907                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5908                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5909                         break;
5910         }
5911
5912         if (on) {
5913                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5914                         return I40E_SUCCESS; /* Already on, skip next steps */
5915                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5916         } else {
5917                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5918                         return I40E_SUCCESS; /* Already off, skip next steps */
5919                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5920         }
5921
5922         /* Write the register */
5923         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5924         /* Check the result */
5925         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5926                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5927                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5928                 if (on) {
5929                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5930                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5931                                 break;
5932                 } else {
5933                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5934                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5935                                 break;
5936                 }
5937         }
5938
5939         /* Check if it is timeout */
5940         if (j >= I40E_CHK_Q_ENA_COUNT) {
5941                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5942                             (on ? "enable" : "disable"), q_idx);
5943                 return I40E_ERR_TIMEOUT;
5944         }
5945
5946         return I40E_SUCCESS;
5947 }
5948 /* Switch on or off the rx queues */
5949 static int
5950 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5951 {
5952         struct rte_eth_dev_data *dev_data = pf->dev_data;
5953         struct i40e_rx_queue *rxq;
5954         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5955         uint16_t i;
5956         int ret;
5957
5958         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5959                 rxq = dev_data->rx_queues[i];
5960                 /* Don't operate the queue if not configured or
5961                  * if starting only per queue */
5962                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5963                         continue;
5964                 if (on)
5965                         ret = i40e_dev_rx_queue_start(dev, i);
5966                 else
5967                         ret = i40e_dev_rx_queue_stop(dev, i);
5968                 if (ret != I40E_SUCCESS)
5969                         return ret;
5970         }
5971
5972         return I40E_SUCCESS;
5973 }
5974
5975 /* Switch on or off all the rx/tx queues */
5976 int
5977 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5978 {
5979         int ret;
5980
5981         if (on) {
5982                 /* enable rx queues before enabling tx queues */
5983                 ret = i40e_dev_switch_rx_queues(pf, on);
5984                 if (ret) {
5985                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5986                         return ret;
5987                 }
5988                 ret = i40e_dev_switch_tx_queues(pf, on);
5989         } else {
5990                 /* Stop tx queues before stopping rx queues */
5991                 ret = i40e_dev_switch_tx_queues(pf, on);
5992                 if (ret) {
5993                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5994                         return ret;
5995                 }
5996                 ret = i40e_dev_switch_rx_queues(pf, on);
5997         }
5998
5999         return ret;
6000 }
6001
6002 /* Initialize VSI for TX */
6003 static int
6004 i40e_dev_tx_init(struct i40e_pf *pf)
6005 {
6006         struct rte_eth_dev_data *data = pf->dev_data;
6007         uint16_t i;
6008         uint32_t ret = I40E_SUCCESS;
6009         struct i40e_tx_queue *txq;
6010
6011         for (i = 0; i < data->nb_tx_queues; i++) {
6012                 txq = data->tx_queues[i];
6013                 if (!txq || !txq->q_set)
6014                         continue;
6015                 ret = i40e_tx_queue_init(txq);
6016                 if (ret != I40E_SUCCESS)
6017                         break;
6018         }
6019         if (ret == I40E_SUCCESS)
6020                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6021                                      ->eth_dev);
6022
6023         return ret;
6024 }
6025
6026 /* Initialize VSI for RX */
6027 static int
6028 i40e_dev_rx_init(struct i40e_pf *pf)
6029 {
6030         struct rte_eth_dev_data *data = pf->dev_data;
6031         int ret = I40E_SUCCESS;
6032         uint16_t i;
6033         struct i40e_rx_queue *rxq;
6034
6035         i40e_pf_config_mq_rx(pf);
6036         for (i = 0; i < data->nb_rx_queues; i++) {
6037                 rxq = data->rx_queues[i];
6038                 if (!rxq || !rxq->q_set)
6039                         continue;
6040
6041                 ret = i40e_rx_queue_init(rxq);
6042                 if (ret != I40E_SUCCESS) {
6043                         PMD_DRV_LOG(ERR,
6044                                 "Failed to do RX queue initialization");
6045                         break;
6046                 }
6047         }
6048         if (ret == I40E_SUCCESS)
6049                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6050                                      ->eth_dev);
6051
6052         return ret;
6053 }
6054
6055 static int
6056 i40e_dev_rxtx_init(struct i40e_pf *pf)
6057 {
6058         int err;
6059
6060         err = i40e_dev_tx_init(pf);
6061         if (err) {
6062                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6063                 return err;
6064         }
6065         err = i40e_dev_rx_init(pf);
6066         if (err) {
6067                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6068                 return err;
6069         }
6070
6071         return err;
6072 }
6073
6074 static int
6075 i40e_vmdq_setup(struct rte_eth_dev *dev)
6076 {
6077         struct rte_eth_conf *conf = &dev->data->dev_conf;
6078         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6079         int i, err, conf_vsis, j, loop;
6080         struct i40e_vsi *vsi;
6081         struct i40e_vmdq_info *vmdq_info;
6082         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6083         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6084
6085         /*
6086          * Disable interrupt to avoid message from VF. Furthermore, it will
6087          * avoid race condition in VSI creation/destroy.
6088          */
6089         i40e_pf_disable_irq0(hw);
6090
6091         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6092                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6093                 return -ENOTSUP;
6094         }
6095
6096         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6097         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6098                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6099                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6100                         pf->max_nb_vmdq_vsi);
6101                 return -ENOTSUP;
6102         }
6103
6104         if (pf->vmdq != NULL) {
6105                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6106                 return 0;
6107         }
6108
6109         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6110                                 sizeof(*vmdq_info) * conf_vsis, 0);
6111
6112         if (pf->vmdq == NULL) {
6113                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6114                 return -ENOMEM;
6115         }
6116
6117         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6118
6119         /* Create VMDQ VSI */
6120         for (i = 0; i < conf_vsis; i++) {
6121                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6122                                 vmdq_conf->enable_loop_back);
6123                 if (vsi == NULL) {
6124                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6125                         err = -1;
6126                         goto err_vsi_setup;
6127                 }
6128                 vmdq_info = &pf->vmdq[i];
6129                 vmdq_info->pf = pf;
6130                 vmdq_info->vsi = vsi;
6131         }
6132         pf->nb_cfg_vmdq_vsi = conf_vsis;
6133
6134         /* Configure Vlan */
6135         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6136         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6137                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6138                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6139                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6140                                         vmdq_conf->pool_map[i].vlan_id, j);
6141
6142                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6143                                                 vmdq_conf->pool_map[i].vlan_id);
6144                                 if (err) {
6145                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6146                                         err = -1;
6147                                         goto err_vsi_setup;
6148                                 }
6149                         }
6150                 }
6151         }
6152
6153         i40e_pf_enable_irq0(hw);
6154
6155         return 0;
6156
6157 err_vsi_setup:
6158         for (i = 0; i < conf_vsis; i++)
6159                 if (pf->vmdq[i].vsi == NULL)
6160                         break;
6161                 else
6162                         i40e_vsi_release(pf->vmdq[i].vsi);
6163
6164         rte_free(pf->vmdq);
6165         pf->vmdq = NULL;
6166         i40e_pf_enable_irq0(hw);
6167         return err;
6168 }
6169
6170 static void
6171 i40e_stat_update_32(struct i40e_hw *hw,
6172                    uint32_t reg,
6173                    bool offset_loaded,
6174                    uint64_t *offset,
6175                    uint64_t *stat)
6176 {
6177         uint64_t new_data;
6178
6179         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6180         if (!offset_loaded)
6181                 *offset = new_data;
6182
6183         if (new_data >= *offset)
6184                 *stat = (uint64_t)(new_data - *offset);
6185         else
6186                 *stat = (uint64_t)((new_data +
6187                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6188 }
6189
6190 static void
6191 i40e_stat_update_48(struct i40e_hw *hw,
6192                    uint32_t hireg,
6193                    uint32_t loreg,
6194                    bool offset_loaded,
6195                    uint64_t *offset,
6196                    uint64_t *stat)
6197 {
6198         uint64_t new_data;
6199
6200         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6201         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6202                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6203
6204         if (!offset_loaded)
6205                 *offset = new_data;
6206
6207         if (new_data >= *offset)
6208                 *stat = new_data - *offset;
6209         else
6210                 *stat = (uint64_t)((new_data +
6211                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6212
6213         *stat &= I40E_48_BIT_MASK;
6214 }
6215
6216 /* Disable IRQ0 */
6217 void
6218 i40e_pf_disable_irq0(struct i40e_hw *hw)
6219 {
6220         /* Disable all interrupt types */
6221         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6222                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6223         I40E_WRITE_FLUSH(hw);
6224 }
6225
6226 /* Enable IRQ0 */
6227 void
6228 i40e_pf_enable_irq0(struct i40e_hw *hw)
6229 {
6230         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6231                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6232                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6233                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6234         I40E_WRITE_FLUSH(hw);
6235 }
6236
6237 static void
6238 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6239 {
6240         /* read pending request and disable first */
6241         i40e_pf_disable_irq0(hw);
6242         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6243         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6244                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6245
6246         if (no_queue)
6247                 /* Link no queues with irq0 */
6248                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6249                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6250 }
6251
6252 static void
6253 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6254 {
6255         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6256         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6257         int i;
6258         uint16_t abs_vf_id;
6259         uint32_t index, offset, val;
6260
6261         if (!pf->vfs)
6262                 return;
6263         /**
6264          * Try to find which VF trigger a reset, use absolute VF id to access
6265          * since the reg is global register.
6266          */
6267         for (i = 0; i < pf->vf_num; i++) {
6268                 abs_vf_id = hw->func_caps.vf_base_id + i;
6269                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6270                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6271                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6272                 /* VFR event occurred */
6273                 if (val & (0x1 << offset)) {
6274                         int ret;
6275
6276                         /* Clear the event first */
6277                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6278                                                         (0x1 << offset));
6279                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6280                         /**
6281                          * Only notify a VF reset event occurred,
6282                          * don't trigger another SW reset
6283                          */
6284                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6285                         if (ret != I40E_SUCCESS)
6286                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6287                 }
6288         }
6289 }
6290
6291 static void
6292 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6293 {
6294         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6295         int i;
6296
6297         for (i = 0; i < pf->vf_num; i++)
6298                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6299 }
6300
6301 static void
6302 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6303 {
6304         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6305         struct i40e_arq_event_info info;
6306         uint16_t pending, opcode;
6307         int ret;
6308
6309         info.buf_len = I40E_AQ_BUF_SZ;
6310         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6311         if (!info.msg_buf) {
6312                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6313                 return;
6314         }
6315
6316         pending = 1;
6317         while (pending) {
6318                 ret = i40e_clean_arq_element(hw, &info, &pending);
6319
6320                 if (ret != I40E_SUCCESS) {
6321                         PMD_DRV_LOG(INFO,
6322                                 "Failed to read msg from AdminQ, aq_err: %u",
6323                                 hw->aq.asq_last_status);
6324                         break;
6325                 }
6326                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6327
6328                 switch (opcode) {
6329                 case i40e_aqc_opc_send_msg_to_pf:
6330                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6331                         i40e_pf_host_handle_vf_msg(dev,
6332                                         rte_le_to_cpu_16(info.desc.retval),
6333                                         rte_le_to_cpu_32(info.desc.cookie_high),
6334                                         rte_le_to_cpu_32(info.desc.cookie_low),
6335                                         info.msg_buf,
6336                                         info.msg_len);
6337                         break;
6338                 case i40e_aqc_opc_get_link_status:
6339                         ret = i40e_dev_link_update(dev, 0);
6340                         if (!ret)
6341                                 _rte_eth_dev_callback_process(dev,
6342                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6343                         break;
6344                 default:
6345                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6346                                     opcode);
6347                         break;
6348                 }
6349         }
6350         rte_free(info.msg_buf);
6351 }
6352
6353 /**
6354  * Interrupt handler triggered by NIC  for handling
6355  * specific interrupt.
6356  *
6357  * @param handle
6358  *  Pointer to interrupt handle.
6359  * @param param
6360  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6361  *
6362  * @return
6363  *  void
6364  */
6365 static void
6366 i40e_dev_interrupt_handler(void *param)
6367 {
6368         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6369         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6370         uint32_t icr0;
6371
6372         /* Disable interrupt */
6373         i40e_pf_disable_irq0(hw);
6374
6375         /* read out interrupt causes */
6376         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6377
6378         /* No interrupt event indicated */
6379         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6380                 PMD_DRV_LOG(INFO, "No interrupt event");
6381                 goto done;
6382         }
6383         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6384                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6385         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6386                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6387         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6388                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6389         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6390                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6391         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6392                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6393         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6394                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6395         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6396                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6397
6398         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6399                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6400                 i40e_dev_handle_vfr_event(dev);
6401         }
6402         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6403                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6404                 i40e_dev_handle_aq_msg(dev);
6405         }
6406
6407 done:
6408         /* Enable interrupt */
6409         i40e_pf_enable_irq0(hw);
6410         rte_intr_enable(dev->intr_handle);
6411 }
6412
6413 int
6414 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6415                          struct i40e_macvlan_filter *filter,
6416                          int total)
6417 {
6418         int ele_num, ele_buff_size;
6419         int num, actual_num, i;
6420         uint16_t flags;
6421         int ret = I40E_SUCCESS;
6422         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6423         struct i40e_aqc_add_macvlan_element_data *req_list;
6424
6425         if (filter == NULL  || total == 0)
6426                 return I40E_ERR_PARAM;
6427         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6428         ele_buff_size = hw->aq.asq_buf_size;
6429
6430         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6431         if (req_list == NULL) {
6432                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6433                 return I40E_ERR_NO_MEMORY;
6434         }
6435
6436         num = 0;
6437         do {
6438                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6439                 memset(req_list, 0, ele_buff_size);
6440
6441                 for (i = 0; i < actual_num; i++) {
6442                         rte_memcpy(req_list[i].mac_addr,
6443                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6444                         req_list[i].vlan_tag =
6445                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6446
6447                         switch (filter[num + i].filter_type) {
6448                         case RTE_MAC_PERFECT_MATCH:
6449                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6450                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6451                                 break;
6452                         case RTE_MACVLAN_PERFECT_MATCH:
6453                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6454                                 break;
6455                         case RTE_MAC_HASH_MATCH:
6456                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6457                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6458                                 break;
6459                         case RTE_MACVLAN_HASH_MATCH:
6460                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6461                                 break;
6462                         default:
6463                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6464                                 ret = I40E_ERR_PARAM;
6465                                 goto DONE;
6466                         }
6467
6468                         req_list[i].queue_number = 0;
6469
6470                         req_list[i].flags = rte_cpu_to_le_16(flags);
6471                 }
6472
6473                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6474                                                 actual_num, NULL);
6475                 if (ret != I40E_SUCCESS) {
6476                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6477                         goto DONE;
6478                 }
6479                 num += actual_num;
6480         } while (num < total);
6481
6482 DONE:
6483         rte_free(req_list);
6484         return ret;
6485 }
6486
6487 int
6488 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6489                             struct i40e_macvlan_filter *filter,
6490                             int total)
6491 {
6492         int ele_num, ele_buff_size;
6493         int num, actual_num, i;
6494         uint16_t flags;
6495         int ret = I40E_SUCCESS;
6496         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6497         struct i40e_aqc_remove_macvlan_element_data *req_list;
6498
6499         if (filter == NULL  || total == 0)
6500                 return I40E_ERR_PARAM;
6501
6502         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6503         ele_buff_size = hw->aq.asq_buf_size;
6504
6505         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6506         if (req_list == NULL) {
6507                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6508                 return I40E_ERR_NO_MEMORY;
6509         }
6510
6511         num = 0;
6512         do {
6513                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6514                 memset(req_list, 0, ele_buff_size);
6515
6516                 for (i = 0; i < actual_num; i++) {
6517                         rte_memcpy(req_list[i].mac_addr,
6518                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6519                         req_list[i].vlan_tag =
6520                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6521
6522                         switch (filter[num + i].filter_type) {
6523                         case RTE_MAC_PERFECT_MATCH:
6524                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6525                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6526                                 break;
6527                         case RTE_MACVLAN_PERFECT_MATCH:
6528                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6529                                 break;
6530                         case RTE_MAC_HASH_MATCH:
6531                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6532                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6533                                 break;
6534                         case RTE_MACVLAN_HASH_MATCH:
6535                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6536                                 break;
6537                         default:
6538                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6539                                 ret = I40E_ERR_PARAM;
6540                                 goto DONE;
6541                         }
6542                         req_list[i].flags = rte_cpu_to_le_16(flags);
6543                 }
6544
6545                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6546                                                 actual_num, NULL);
6547                 if (ret != I40E_SUCCESS) {
6548                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6549                         goto DONE;
6550                 }
6551                 num += actual_num;
6552         } while (num < total);
6553
6554 DONE:
6555         rte_free(req_list);
6556         return ret;
6557 }
6558
6559 /* Find out specific MAC filter */
6560 static struct i40e_mac_filter *
6561 i40e_find_mac_filter(struct i40e_vsi *vsi,
6562                          struct ether_addr *macaddr)
6563 {
6564         struct i40e_mac_filter *f;
6565
6566         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6567                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6568                         return f;
6569         }
6570
6571         return NULL;
6572 }
6573
6574 static bool
6575 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6576                          uint16_t vlan_id)
6577 {
6578         uint32_t vid_idx, vid_bit;
6579
6580         if (vlan_id > ETH_VLAN_ID_MAX)
6581                 return 0;
6582
6583         vid_idx = I40E_VFTA_IDX(vlan_id);
6584         vid_bit = I40E_VFTA_BIT(vlan_id);
6585
6586         if (vsi->vfta[vid_idx] & vid_bit)
6587                 return 1;
6588         else
6589                 return 0;
6590 }
6591
6592 static void
6593 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6594                        uint16_t vlan_id, bool on)
6595 {
6596         uint32_t vid_idx, vid_bit;
6597
6598         vid_idx = I40E_VFTA_IDX(vlan_id);
6599         vid_bit = I40E_VFTA_BIT(vlan_id);
6600
6601         if (on)
6602                 vsi->vfta[vid_idx] |= vid_bit;
6603         else
6604                 vsi->vfta[vid_idx] &= ~vid_bit;
6605 }
6606
6607 void
6608 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6609                      uint16_t vlan_id, bool on)
6610 {
6611         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6612         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6613         int ret;
6614
6615         if (vlan_id > ETH_VLAN_ID_MAX)
6616                 return;
6617
6618         i40e_store_vlan_filter(vsi, vlan_id, on);
6619
6620         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6621                 return;
6622
6623         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6624
6625         if (on) {
6626                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6627                                        &vlan_data, 1, NULL);
6628                 if (ret != I40E_SUCCESS)
6629                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6630         } else {
6631                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6632                                           &vlan_data, 1, NULL);
6633                 if (ret != I40E_SUCCESS)
6634                         PMD_DRV_LOG(ERR,
6635                                     "Failed to remove vlan filter");
6636         }
6637 }
6638
6639 /**
6640  * Find all vlan options for specific mac addr,
6641  * return with actual vlan found.
6642  */
6643 int
6644 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6645                            struct i40e_macvlan_filter *mv_f,
6646                            int num, struct ether_addr *addr)
6647 {
6648         int i;
6649         uint32_t j, k;
6650
6651         /**
6652          * Not to use i40e_find_vlan_filter to decrease the loop time,
6653          * although the code looks complex.
6654           */
6655         if (num < vsi->vlan_num)
6656                 return I40E_ERR_PARAM;
6657
6658         i = 0;
6659         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6660                 if (vsi->vfta[j]) {
6661                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6662                                 if (vsi->vfta[j] & (1 << k)) {
6663                                         if (i > num - 1) {
6664                                                 PMD_DRV_LOG(ERR,
6665                                                         "vlan number doesn't match");
6666                                                 return I40E_ERR_PARAM;
6667                                         }
6668                                         rte_memcpy(&mv_f[i].macaddr,
6669                                                         addr, ETH_ADDR_LEN);
6670                                         mv_f[i].vlan_id =
6671                                                 j * I40E_UINT32_BIT_SIZE + k;
6672                                         i++;
6673                                 }
6674                         }
6675                 }
6676         }
6677         return I40E_SUCCESS;
6678 }
6679
6680 static inline int
6681 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6682                            struct i40e_macvlan_filter *mv_f,
6683                            int num,
6684                            uint16_t vlan)
6685 {
6686         int i = 0;
6687         struct i40e_mac_filter *f;
6688
6689         if (num < vsi->mac_num)
6690                 return I40E_ERR_PARAM;
6691
6692         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6693                 if (i > num - 1) {
6694                         PMD_DRV_LOG(ERR, "buffer number not match");
6695                         return I40E_ERR_PARAM;
6696                 }
6697                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6698                                 ETH_ADDR_LEN);
6699                 mv_f[i].vlan_id = vlan;
6700                 mv_f[i].filter_type = f->mac_info.filter_type;
6701                 i++;
6702         }
6703
6704         return I40E_SUCCESS;
6705 }
6706
6707 static int
6708 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6709 {
6710         int i, j, num;
6711         struct i40e_mac_filter *f;
6712         struct i40e_macvlan_filter *mv_f;
6713         int ret = I40E_SUCCESS;
6714
6715         if (vsi == NULL || vsi->mac_num == 0)
6716                 return I40E_ERR_PARAM;
6717
6718         /* Case that no vlan is set */
6719         if (vsi->vlan_num == 0)
6720                 num = vsi->mac_num;
6721         else
6722                 num = vsi->mac_num * vsi->vlan_num;
6723
6724         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6725         if (mv_f == NULL) {
6726                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6727                 return I40E_ERR_NO_MEMORY;
6728         }
6729
6730         i = 0;
6731         if (vsi->vlan_num == 0) {
6732                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6733                         rte_memcpy(&mv_f[i].macaddr,
6734                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6735                         mv_f[i].filter_type = f->mac_info.filter_type;
6736                         mv_f[i].vlan_id = 0;
6737                         i++;
6738                 }
6739         } else {
6740                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6741                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6742                                         vsi->vlan_num, &f->mac_info.mac_addr);
6743                         if (ret != I40E_SUCCESS)
6744                                 goto DONE;
6745                         for (j = i; j < i + vsi->vlan_num; j++)
6746                                 mv_f[j].filter_type = f->mac_info.filter_type;
6747                         i += vsi->vlan_num;
6748                 }
6749         }
6750
6751         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6752 DONE:
6753         rte_free(mv_f);
6754
6755         return ret;
6756 }
6757
6758 int
6759 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6760 {
6761         struct i40e_macvlan_filter *mv_f;
6762         int mac_num;
6763         int ret = I40E_SUCCESS;
6764
6765         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6766                 return I40E_ERR_PARAM;
6767
6768         /* If it's already set, just return */
6769         if (i40e_find_vlan_filter(vsi,vlan))
6770                 return I40E_SUCCESS;
6771
6772         mac_num = vsi->mac_num;
6773
6774         if (mac_num == 0) {
6775                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6776                 return I40E_ERR_PARAM;
6777         }
6778
6779         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6780
6781         if (mv_f == NULL) {
6782                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6783                 return I40E_ERR_NO_MEMORY;
6784         }
6785
6786         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6787
6788         if (ret != I40E_SUCCESS)
6789                 goto DONE;
6790
6791         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6792
6793         if (ret != I40E_SUCCESS)
6794                 goto DONE;
6795
6796         i40e_set_vlan_filter(vsi, vlan, 1);
6797
6798         vsi->vlan_num++;
6799         ret = I40E_SUCCESS;
6800 DONE:
6801         rte_free(mv_f);
6802         return ret;
6803 }
6804
6805 int
6806 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6807 {
6808         struct i40e_macvlan_filter *mv_f;
6809         int mac_num;
6810         int ret = I40E_SUCCESS;
6811
6812         /**
6813          * Vlan 0 is the generic filter for untagged packets
6814          * and can't be removed.
6815          */
6816         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6817                 return I40E_ERR_PARAM;
6818
6819         /* If can't find it, just return */
6820         if (!i40e_find_vlan_filter(vsi, vlan))
6821                 return I40E_ERR_PARAM;
6822
6823         mac_num = vsi->mac_num;
6824
6825         if (mac_num == 0) {
6826                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6827                 return I40E_ERR_PARAM;
6828         }
6829
6830         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6831
6832         if (mv_f == NULL) {
6833                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6834                 return I40E_ERR_NO_MEMORY;
6835         }
6836
6837         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6838
6839         if (ret != I40E_SUCCESS)
6840                 goto DONE;
6841
6842         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6843
6844         if (ret != I40E_SUCCESS)
6845                 goto DONE;
6846
6847         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6848         if (vsi->vlan_num == 1) {
6849                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6850                 if (ret != I40E_SUCCESS)
6851                         goto DONE;
6852
6853                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6854                 if (ret != I40E_SUCCESS)
6855                         goto DONE;
6856         }
6857
6858         i40e_set_vlan_filter(vsi, vlan, 0);
6859
6860         vsi->vlan_num--;
6861         ret = I40E_SUCCESS;
6862 DONE:
6863         rte_free(mv_f);
6864         return ret;
6865 }
6866
6867 int
6868 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6869 {
6870         struct i40e_mac_filter *f;
6871         struct i40e_macvlan_filter *mv_f;
6872         int i, vlan_num = 0;
6873         int ret = I40E_SUCCESS;
6874
6875         /* If it's add and we've config it, return */
6876         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6877         if (f != NULL)
6878                 return I40E_SUCCESS;
6879         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6880                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6881
6882                 /**
6883                  * If vlan_num is 0, that's the first time to add mac,
6884                  * set mask for vlan_id 0.
6885                  */
6886                 if (vsi->vlan_num == 0) {
6887                         i40e_set_vlan_filter(vsi, 0, 1);
6888                         vsi->vlan_num = 1;
6889                 }
6890                 vlan_num = vsi->vlan_num;
6891         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6892                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6893                 vlan_num = 1;
6894
6895         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6896         if (mv_f == NULL) {
6897                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6898                 return I40E_ERR_NO_MEMORY;
6899         }
6900
6901         for (i = 0; i < vlan_num; i++) {
6902                 mv_f[i].filter_type = mac_filter->filter_type;
6903                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6904                                 ETH_ADDR_LEN);
6905         }
6906
6907         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6908                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6909                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6910                                         &mac_filter->mac_addr);
6911                 if (ret != I40E_SUCCESS)
6912                         goto DONE;
6913         }
6914
6915         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6916         if (ret != I40E_SUCCESS)
6917                 goto DONE;
6918
6919         /* Add the mac addr into mac list */
6920         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6921         if (f == NULL) {
6922                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6923                 ret = I40E_ERR_NO_MEMORY;
6924                 goto DONE;
6925         }
6926         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6927                         ETH_ADDR_LEN);
6928         f->mac_info.filter_type = mac_filter->filter_type;
6929         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6930         vsi->mac_num++;
6931
6932         ret = I40E_SUCCESS;
6933 DONE:
6934         rte_free(mv_f);
6935
6936         return ret;
6937 }
6938
6939 int
6940 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6941 {
6942         struct i40e_mac_filter *f;
6943         struct i40e_macvlan_filter *mv_f;
6944         int i, vlan_num;
6945         enum rte_mac_filter_type filter_type;
6946         int ret = I40E_SUCCESS;
6947
6948         /* Can't find it, return an error */
6949         f = i40e_find_mac_filter(vsi, addr);
6950         if (f == NULL)
6951                 return I40E_ERR_PARAM;
6952
6953         vlan_num = vsi->vlan_num;
6954         filter_type = f->mac_info.filter_type;
6955         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6956                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6957                 if (vlan_num == 0) {
6958                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6959                         return I40E_ERR_PARAM;
6960                 }
6961         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6962                         filter_type == RTE_MAC_HASH_MATCH)
6963                 vlan_num = 1;
6964
6965         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6966         if (mv_f == NULL) {
6967                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6968                 return I40E_ERR_NO_MEMORY;
6969         }
6970
6971         for (i = 0; i < vlan_num; i++) {
6972                 mv_f[i].filter_type = filter_type;
6973                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6974                                 ETH_ADDR_LEN);
6975         }
6976         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6977                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6978                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6979                 if (ret != I40E_SUCCESS)
6980                         goto DONE;
6981         }
6982
6983         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6984         if (ret != I40E_SUCCESS)
6985                 goto DONE;
6986
6987         /* Remove the mac addr into mac list */
6988         TAILQ_REMOVE(&vsi->mac_list, f, next);
6989         rte_free(f);
6990         vsi->mac_num--;
6991
6992         ret = I40E_SUCCESS;
6993 DONE:
6994         rte_free(mv_f);
6995         return ret;
6996 }
6997
6998 /* Configure hash enable flags for RSS */
6999 uint64_t
7000 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7001 {
7002         uint64_t hena = 0;
7003         int i;
7004
7005         if (!flags)
7006                 return hena;
7007
7008         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7009                 if (flags & (1ULL << i))
7010                         hena |= adapter->pctypes_tbl[i];
7011         }
7012
7013         return hena;
7014 }
7015
7016 /* Parse the hash enable flags */
7017 uint64_t
7018 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7019 {
7020         uint64_t rss_hf = 0;
7021
7022         if (!flags)
7023                 return rss_hf;
7024         int i;
7025
7026         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7027                 if (flags & adapter->pctypes_tbl[i])
7028                         rss_hf |= (1ULL << i);
7029         }
7030         return rss_hf;
7031 }
7032
7033 /* Disable RSS */
7034 static void
7035 i40e_pf_disable_rss(struct i40e_pf *pf)
7036 {
7037         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7038
7039         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7040         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7041         I40E_WRITE_FLUSH(hw);
7042 }
7043
7044 int
7045 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7046 {
7047         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7048         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7049         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7050                            I40E_VFQF_HKEY_MAX_INDEX :
7051                            I40E_PFQF_HKEY_MAX_INDEX;
7052         int ret = 0;
7053
7054         if (!key || key_len == 0) {
7055                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7056                 return 0;
7057         } else if (key_len != (key_idx + 1) *
7058                 sizeof(uint32_t)) {
7059                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7060                 return -EINVAL;
7061         }
7062
7063         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7064                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7065                         (struct i40e_aqc_get_set_rss_key_data *)key;
7066
7067                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7068                 if (ret)
7069                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7070         } else {
7071                 uint32_t *hash_key = (uint32_t *)key;
7072                 uint16_t i;
7073
7074                 if (vsi->type == I40E_VSI_SRIOV) {
7075                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7076                                 I40E_WRITE_REG(
7077                                         hw,
7078                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7079                                         hash_key[i]);
7080
7081                 } else {
7082                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7083                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7084                                                hash_key[i]);
7085                 }
7086                 I40E_WRITE_FLUSH(hw);
7087         }
7088
7089         return ret;
7090 }
7091
7092 static int
7093 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7094 {
7095         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7096         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7097         uint32_t reg;
7098         int ret;
7099
7100         if (!key || !key_len)
7101                 return -EINVAL;
7102
7103         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7104                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7105                         (struct i40e_aqc_get_set_rss_key_data *)key);
7106                 if (ret) {
7107                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7108                         return ret;
7109                 }
7110         } else {
7111                 uint32_t *key_dw = (uint32_t *)key;
7112                 uint16_t i;
7113
7114                 if (vsi->type == I40E_VSI_SRIOV) {
7115                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7116                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7117                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7118                         }
7119                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7120                                    sizeof(uint32_t);
7121                 } else {
7122                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7123                                 reg = I40E_PFQF_HKEY(i);
7124                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7125                         }
7126                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7127                                    sizeof(uint32_t);
7128                 }
7129         }
7130         return 0;
7131 }
7132
7133 static int
7134 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7135 {
7136         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7137         uint64_t hena;
7138         int ret;
7139
7140         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7141                                rss_conf->rss_key_len);
7142         if (ret)
7143                 return ret;
7144
7145         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7146         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7147         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7148         I40E_WRITE_FLUSH(hw);
7149
7150         return 0;
7151 }
7152
7153 static int
7154 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7155                          struct rte_eth_rss_conf *rss_conf)
7156 {
7157         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7158         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7159         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7160         uint64_t hena;
7161
7162         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7163         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7164
7165         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7166                 if (rss_hf != 0) /* Enable RSS */
7167                         return -EINVAL;
7168                 return 0; /* Nothing to do */
7169         }
7170         /* RSS enabled */
7171         if (rss_hf == 0) /* Disable RSS */
7172                 return -EINVAL;
7173
7174         return i40e_hw_rss_hash_set(pf, rss_conf);
7175 }
7176
7177 static int
7178 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7179                            struct rte_eth_rss_conf *rss_conf)
7180 {
7181         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7182         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7183         uint64_t hena;
7184
7185         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7186                          &rss_conf->rss_key_len);
7187
7188         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7189         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7190         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7191
7192         return 0;
7193 }
7194
7195 static int
7196 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7197 {
7198         switch (filter_type) {
7199         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7200                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7201                 break;
7202         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7203                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7204                 break;
7205         case RTE_TUNNEL_FILTER_IMAC_TENID:
7206                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7207                 break;
7208         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7209                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7210                 break;
7211         case ETH_TUNNEL_FILTER_IMAC:
7212                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7213                 break;
7214         case ETH_TUNNEL_FILTER_OIP:
7215                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7216                 break;
7217         case ETH_TUNNEL_FILTER_IIP:
7218                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7219                 break;
7220         default:
7221                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7222                 return -EINVAL;
7223         }
7224
7225         return 0;
7226 }
7227
7228 /* Convert tunnel filter structure */
7229 static int
7230 i40e_tunnel_filter_convert(
7231         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7232         struct i40e_tunnel_filter *tunnel_filter)
7233 {
7234         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7235                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
7236         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7237                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
7238         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7239         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7240              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7241             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7242                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7243         else
7244                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7245         tunnel_filter->input.flags = cld_filter->element.flags;
7246         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7247         tunnel_filter->queue = cld_filter->element.queue_number;
7248         rte_memcpy(tunnel_filter->input.general_fields,
7249                    cld_filter->general_fields,
7250                    sizeof(cld_filter->general_fields));
7251
7252         return 0;
7253 }
7254
7255 /* Check if there exists the tunnel filter */
7256 struct i40e_tunnel_filter *
7257 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7258                              const struct i40e_tunnel_filter_input *input)
7259 {
7260         int ret;
7261
7262         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7263         if (ret < 0)
7264                 return NULL;
7265
7266         return tunnel_rule->hash_map[ret];
7267 }
7268
7269 /* Add a tunnel filter into the SW list */
7270 static int
7271 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7272                              struct i40e_tunnel_filter *tunnel_filter)
7273 {
7274         struct i40e_tunnel_rule *rule = &pf->tunnel;
7275         int ret;
7276
7277         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7278         if (ret < 0) {
7279                 PMD_DRV_LOG(ERR,
7280                             "Failed to insert tunnel filter to hash table %d!",
7281                             ret);
7282                 return ret;
7283         }
7284         rule->hash_map[ret] = tunnel_filter;
7285
7286         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7287
7288         return 0;
7289 }
7290
7291 /* Delete a tunnel filter from the SW list */
7292 int
7293 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7294                           struct i40e_tunnel_filter_input *input)
7295 {
7296         struct i40e_tunnel_rule *rule = &pf->tunnel;
7297         struct i40e_tunnel_filter *tunnel_filter;
7298         int ret;
7299
7300         ret = rte_hash_del_key(rule->hash_table, input);
7301         if (ret < 0) {
7302                 PMD_DRV_LOG(ERR,
7303                             "Failed to delete tunnel filter to hash table %d!",
7304                             ret);
7305                 return ret;
7306         }
7307         tunnel_filter = rule->hash_map[ret];
7308         rule->hash_map[ret] = NULL;
7309
7310         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7311         rte_free(tunnel_filter);
7312
7313         return 0;
7314 }
7315
7316 int
7317 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7318                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7319                         uint8_t add)
7320 {
7321         uint16_t ip_type;
7322         uint32_t ipv4_addr, ipv4_addr_le;
7323         uint8_t i, tun_type = 0;
7324         /* internal varialbe to convert ipv6 byte order */
7325         uint32_t convert_ipv6[4];
7326         int val, ret = 0;
7327         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7328         struct i40e_vsi *vsi = pf->main_vsi;
7329         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7330         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7331         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7332         struct i40e_tunnel_filter *tunnel, *node;
7333         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7334
7335         cld_filter = rte_zmalloc("tunnel_filter",
7336                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7337         0);
7338
7339         if (NULL == cld_filter) {
7340                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7341                 return -ENOMEM;
7342         }
7343         pfilter = cld_filter;
7344
7345         ether_addr_copy(&tunnel_filter->outer_mac,
7346                         (struct ether_addr *)&pfilter->element.outer_mac);
7347         ether_addr_copy(&tunnel_filter->inner_mac,
7348                         (struct ether_addr *)&pfilter->element.inner_mac);
7349
7350         pfilter->element.inner_vlan =
7351                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7352         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7353                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7354                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7355                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7356                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7357                                 &ipv4_addr_le,
7358                                 sizeof(pfilter->element.ipaddr.v4.data));
7359         } else {
7360                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7361                 for (i = 0; i < 4; i++) {
7362                         convert_ipv6[i] =
7363                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7364                 }
7365                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7366                            &convert_ipv6,
7367                            sizeof(pfilter->element.ipaddr.v6.data));
7368         }
7369
7370         /* check tunneled type */
7371         switch (tunnel_filter->tunnel_type) {
7372         case RTE_TUNNEL_TYPE_VXLAN:
7373                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7374                 break;
7375         case RTE_TUNNEL_TYPE_NVGRE:
7376                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7377                 break;
7378         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7379                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7380                 break;
7381         default:
7382                 /* Other tunnel types is not supported. */
7383                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7384                 rte_free(cld_filter);
7385                 return -EINVAL;
7386         }
7387
7388         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7389                                        &pfilter->element.flags);
7390         if (val < 0) {
7391                 rte_free(cld_filter);
7392                 return -EINVAL;
7393         }
7394
7395         pfilter->element.flags |= rte_cpu_to_le_16(
7396                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7397                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7398         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7399         pfilter->element.queue_number =
7400                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7401
7402         /* Check if there is the filter in SW list */
7403         memset(&check_filter, 0, sizeof(check_filter));
7404         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7405         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7406         if (add && node) {
7407                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7408                 rte_free(cld_filter);
7409                 return -EINVAL;
7410         }
7411
7412         if (!add && !node) {
7413                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7414                 rte_free(cld_filter);
7415                 return -EINVAL;
7416         }
7417
7418         if (add) {
7419                 ret = i40e_aq_add_cloud_filters(hw,
7420                                         vsi->seid, &cld_filter->element, 1);
7421                 if (ret < 0) {
7422                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7423                         rte_free(cld_filter);
7424                         return -ENOTSUP;
7425                 }
7426                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7427                 if (tunnel == NULL) {
7428                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7429                         rte_free(cld_filter);
7430                         return -ENOMEM;
7431                 }
7432
7433                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7434                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7435                 if (ret < 0)
7436                         rte_free(tunnel);
7437         } else {
7438                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7439                                                    &cld_filter->element, 1);
7440                 if (ret < 0) {
7441                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7442                         rte_free(cld_filter);
7443                         return -ENOTSUP;
7444                 }
7445                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7446         }
7447
7448         rte_free(cld_filter);
7449         return ret;
7450 }
7451
7452 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7453 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7454 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7455 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7456 #define I40E_TR_GRE_KEY_MASK                    0x400
7457 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7458 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7459
7460 static enum
7461 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7462 {
7463         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7464         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7465         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7466         enum i40e_status_code status = I40E_SUCCESS;
7467
7468         if (pf->support_multi_driver) {
7469                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7470                 return I40E_NOT_SUPPORTED;
7471         }
7472
7473         memset(&filter_replace, 0,
7474                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7475         memset(&filter_replace_buf, 0,
7476                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7477
7478         /* create L1 filter */
7479         filter_replace.old_filter_type =
7480                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7481         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7482         filter_replace.tr_bit = 0;
7483
7484         /* Prepare the buffer, 3 entries */
7485         filter_replace_buf.data[0] =
7486                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7487         filter_replace_buf.data[0] |=
7488                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7489         filter_replace_buf.data[2] = 0xFF;
7490         filter_replace_buf.data[3] = 0xFF;
7491         filter_replace_buf.data[4] =
7492                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7493         filter_replace_buf.data[4] |=
7494                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7495         filter_replace_buf.data[7] = 0xF0;
7496         filter_replace_buf.data[8]
7497                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7498         filter_replace_buf.data[8] |=
7499                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7500         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7501                 I40E_TR_GENEVE_KEY_MASK |
7502                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7503         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7504                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7505                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7506
7507         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7508                                                &filter_replace_buf);
7509         if (!status) {
7510                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7511                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7512                             "cloud l1 type is changed from 0x%x to 0x%x",
7513                             filter_replace.old_filter_type,
7514                             filter_replace.new_filter_type);
7515         }
7516         return status;
7517 }
7518
7519 static enum
7520 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7521 {
7522         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7523         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7524         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7525         enum i40e_status_code status = I40E_SUCCESS;
7526
7527         if (pf->support_multi_driver) {
7528                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7529                 return I40E_NOT_SUPPORTED;
7530         }
7531
7532         /* For MPLSoUDP */
7533         memset(&filter_replace, 0,
7534                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7535         memset(&filter_replace_buf, 0,
7536                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7537         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7538                 I40E_AQC_MIRROR_CLOUD_FILTER;
7539         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7540         filter_replace.new_filter_type =
7541                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7542         /* Prepare the buffer, 2 entries */
7543         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7544         filter_replace_buf.data[0] |=
7545                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7546         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7547         filter_replace_buf.data[4] |=
7548                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7549         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7550                                                &filter_replace_buf);
7551         if (status < 0)
7552                 return status;
7553         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7554                     "cloud filter type is changed from 0x%x to 0x%x",
7555                     filter_replace.old_filter_type,
7556                     filter_replace.new_filter_type);
7557
7558         /* For MPLSoGRE */
7559         memset(&filter_replace, 0,
7560                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7561         memset(&filter_replace_buf, 0,
7562                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7563
7564         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7565                 I40E_AQC_MIRROR_CLOUD_FILTER;
7566         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7567         filter_replace.new_filter_type =
7568                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7569         /* Prepare the buffer, 2 entries */
7570         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7571         filter_replace_buf.data[0] |=
7572                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7573         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7574         filter_replace_buf.data[4] |=
7575                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7576
7577         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7578                                                &filter_replace_buf);
7579         if (!status) {
7580                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7581                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7582                             "cloud filter type is changed from 0x%x to 0x%x",
7583                             filter_replace.old_filter_type,
7584                             filter_replace.new_filter_type);
7585         }
7586         return status;
7587 }
7588
7589 static enum i40e_status_code
7590 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7591 {
7592         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7593         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7594         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7595         enum i40e_status_code status = I40E_SUCCESS;
7596
7597         if (pf->support_multi_driver) {
7598                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7599                 return I40E_NOT_SUPPORTED;
7600         }
7601
7602         /* For GTP-C */
7603         memset(&filter_replace, 0,
7604                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7605         memset(&filter_replace_buf, 0,
7606                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7607         /* create L1 filter */
7608         filter_replace.old_filter_type =
7609                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7610         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7611         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7612                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7613         /* Prepare the buffer, 2 entries */
7614         filter_replace_buf.data[0] =
7615                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7616         filter_replace_buf.data[0] |=
7617                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7618         filter_replace_buf.data[2] = 0xFF;
7619         filter_replace_buf.data[3] = 0xFF;
7620         filter_replace_buf.data[4] =
7621                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7622         filter_replace_buf.data[4] |=
7623                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7624         filter_replace_buf.data[6] = 0xFF;
7625         filter_replace_buf.data[7] = 0xFF;
7626         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7627                                                &filter_replace_buf);
7628         if (status < 0)
7629                 return status;
7630         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7631                     "cloud l1 type is changed from 0x%x to 0x%x",
7632                     filter_replace.old_filter_type,
7633                     filter_replace.new_filter_type);
7634
7635         /* for GTP-U */
7636         memset(&filter_replace, 0,
7637                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7638         memset(&filter_replace_buf, 0,
7639                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7640         /* create L1 filter */
7641         filter_replace.old_filter_type =
7642                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7643         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7644         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7645                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7646         /* Prepare the buffer, 2 entries */
7647         filter_replace_buf.data[0] =
7648                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7649         filter_replace_buf.data[0] |=
7650                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7651         filter_replace_buf.data[2] = 0xFF;
7652         filter_replace_buf.data[3] = 0xFF;
7653         filter_replace_buf.data[4] =
7654                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7655         filter_replace_buf.data[4] |=
7656                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7657         filter_replace_buf.data[6] = 0xFF;
7658         filter_replace_buf.data[7] = 0xFF;
7659
7660         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7661                                                &filter_replace_buf);
7662         if (!status) {
7663                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7664                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7665                             "cloud l1 type is changed from 0x%x to 0x%x",
7666                             filter_replace.old_filter_type,
7667                             filter_replace.new_filter_type);
7668         }
7669         return status;
7670 }
7671
7672 static enum
7673 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7674 {
7675         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7676         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7677         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7678         enum i40e_status_code status = I40E_SUCCESS;
7679
7680         if (pf->support_multi_driver) {
7681                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7682                 return I40E_NOT_SUPPORTED;
7683         }
7684
7685         /* for GTP-C */
7686         memset(&filter_replace, 0,
7687                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7688         memset(&filter_replace_buf, 0,
7689                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7690         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7691         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7692         filter_replace.new_filter_type =
7693                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7694         /* Prepare the buffer, 2 entries */
7695         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7696         filter_replace_buf.data[0] |=
7697                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7698         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7699         filter_replace_buf.data[4] |=
7700                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7701         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7702                                                &filter_replace_buf);
7703         if (status < 0)
7704                 return status;
7705         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7706                     "cloud filter type is changed from 0x%x to 0x%x",
7707                     filter_replace.old_filter_type,
7708                     filter_replace.new_filter_type);
7709
7710         /* for GTP-U */
7711         memset(&filter_replace, 0,
7712                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7713         memset(&filter_replace_buf, 0,
7714                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7715         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7716         filter_replace.old_filter_type =
7717                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7718         filter_replace.new_filter_type =
7719                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7720         /* Prepare the buffer, 2 entries */
7721         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7722         filter_replace_buf.data[0] |=
7723                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7724         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7725         filter_replace_buf.data[4] |=
7726                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7727
7728         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7729                                                &filter_replace_buf);
7730         if (!status) {
7731                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7732                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7733                             "cloud filter type is changed from 0x%x to 0x%x",
7734                             filter_replace.old_filter_type,
7735                             filter_replace.new_filter_type);
7736         }
7737         return status;
7738 }
7739
7740 int
7741 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7742                       struct i40e_tunnel_filter_conf *tunnel_filter,
7743                       uint8_t add)
7744 {
7745         uint16_t ip_type;
7746         uint32_t ipv4_addr, ipv4_addr_le;
7747         uint8_t i, tun_type = 0;
7748         /* internal variable to convert ipv6 byte order */
7749         uint32_t convert_ipv6[4];
7750         int val, ret = 0;
7751         struct i40e_pf_vf *vf = NULL;
7752         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7753         struct i40e_vsi *vsi;
7754         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7755         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7756         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7757         struct i40e_tunnel_filter *tunnel, *node;
7758         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7759         uint32_t teid_le;
7760         bool big_buffer = 0;
7761
7762         cld_filter = rte_zmalloc("tunnel_filter",
7763                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7764                          0);
7765
7766         if (cld_filter == NULL) {
7767                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7768                 return -ENOMEM;
7769         }
7770         pfilter = cld_filter;
7771
7772         ether_addr_copy(&tunnel_filter->outer_mac,
7773                         (struct ether_addr *)&pfilter->element.outer_mac);
7774         ether_addr_copy(&tunnel_filter->inner_mac,
7775                         (struct ether_addr *)&pfilter->element.inner_mac);
7776
7777         pfilter->element.inner_vlan =
7778                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7779         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7780                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7781                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7782                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7783                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7784                                 &ipv4_addr_le,
7785                                 sizeof(pfilter->element.ipaddr.v4.data));
7786         } else {
7787                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7788                 for (i = 0; i < 4; i++) {
7789                         convert_ipv6[i] =
7790                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7791                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7792                 }
7793                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7794                            &convert_ipv6,
7795                            sizeof(pfilter->element.ipaddr.v6.data));
7796         }
7797
7798         /* check tunneled type */
7799         switch (tunnel_filter->tunnel_type) {
7800         case I40E_TUNNEL_TYPE_VXLAN:
7801                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7802                 break;
7803         case I40E_TUNNEL_TYPE_NVGRE:
7804                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7805                 break;
7806         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7807                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7808                 break;
7809         case I40E_TUNNEL_TYPE_MPLSoUDP:
7810                 if (!pf->mpls_replace_flag) {
7811                         i40e_replace_mpls_l1_filter(pf);
7812                         i40e_replace_mpls_cloud_filter(pf);
7813                         pf->mpls_replace_flag = 1;
7814                 }
7815                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7816                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7817                         teid_le >> 4;
7818                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7819                         (teid_le & 0xF) << 12;
7820                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7821                         0x40;
7822                 big_buffer = 1;
7823                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7824                 break;
7825         case I40E_TUNNEL_TYPE_MPLSoGRE:
7826                 if (!pf->mpls_replace_flag) {
7827                         i40e_replace_mpls_l1_filter(pf);
7828                         i40e_replace_mpls_cloud_filter(pf);
7829                         pf->mpls_replace_flag = 1;
7830                 }
7831                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7832                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7833                         teid_le >> 4;
7834                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7835                         (teid_le & 0xF) << 12;
7836                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7837                         0x0;
7838                 big_buffer = 1;
7839                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7840                 break;
7841         case I40E_TUNNEL_TYPE_GTPC:
7842                 if (!pf->gtp_replace_flag) {
7843                         i40e_replace_gtp_l1_filter(pf);
7844                         i40e_replace_gtp_cloud_filter(pf);
7845                         pf->gtp_replace_flag = 1;
7846                 }
7847                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7848                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7849                         (teid_le >> 16) & 0xFFFF;
7850                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7851                         teid_le & 0xFFFF;
7852                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7853                         0x0;
7854                 big_buffer = 1;
7855                 break;
7856         case I40E_TUNNEL_TYPE_GTPU:
7857                 if (!pf->gtp_replace_flag) {
7858                         i40e_replace_gtp_l1_filter(pf);
7859                         i40e_replace_gtp_cloud_filter(pf);
7860                         pf->gtp_replace_flag = 1;
7861                 }
7862                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7863                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7864                         (teid_le >> 16) & 0xFFFF;
7865                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7866                         teid_le & 0xFFFF;
7867                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7868                         0x0;
7869                 big_buffer = 1;
7870                 break;
7871         case I40E_TUNNEL_TYPE_QINQ:
7872                 if (!pf->qinq_replace_flag) {
7873                         ret = i40e_cloud_filter_qinq_create(pf);
7874                         if (ret < 0)
7875                                 PMD_DRV_LOG(DEBUG,
7876                                             "QinQ tunnel filter already created.");
7877                         pf->qinq_replace_flag = 1;
7878                 }
7879                 /*      Add in the General fields the values of
7880                  *      the Outer and Inner VLAN
7881                  *      Big Buffer should be set, see changes in
7882                  *      i40e_aq_add_cloud_filters
7883                  */
7884                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7885                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7886                 big_buffer = 1;
7887                 break;
7888         default:
7889                 /* Other tunnel types is not supported. */
7890                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7891                 rte_free(cld_filter);
7892                 return -EINVAL;
7893         }
7894
7895         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7896                 pfilter->element.flags =
7897                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7898         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7899                 pfilter->element.flags =
7900                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7901         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7902                 pfilter->element.flags =
7903                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7904         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7905                 pfilter->element.flags =
7906                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7907         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7908                 pfilter->element.flags |=
7909                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7910         else {
7911                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7912                                                 &pfilter->element.flags);
7913                 if (val < 0) {
7914                         rte_free(cld_filter);
7915                         return -EINVAL;
7916                 }
7917         }
7918
7919         pfilter->element.flags |= rte_cpu_to_le_16(
7920                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7921                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7922         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7923         pfilter->element.queue_number =
7924                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7925
7926         if (!tunnel_filter->is_to_vf)
7927                 vsi = pf->main_vsi;
7928         else {
7929                 if (tunnel_filter->vf_id >= pf->vf_num) {
7930                         PMD_DRV_LOG(ERR, "Invalid argument.");
7931                         rte_free(cld_filter);
7932                         return -EINVAL;
7933                 }
7934                 vf = &pf->vfs[tunnel_filter->vf_id];
7935                 vsi = vf->vsi;
7936         }
7937
7938         /* Check if there is the filter in SW list */
7939         memset(&check_filter, 0, sizeof(check_filter));
7940         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7941         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7942         check_filter.vf_id = tunnel_filter->vf_id;
7943         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7944         if (add && node) {
7945                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7946                 rte_free(cld_filter);
7947                 return -EINVAL;
7948         }
7949
7950         if (!add && !node) {
7951                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7952                 rte_free(cld_filter);
7953                 return -EINVAL;
7954         }
7955
7956         if (add) {
7957                 if (big_buffer)
7958                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7959                                                    vsi->seid, cld_filter, 1);
7960                 else
7961                         ret = i40e_aq_add_cloud_filters(hw,
7962                                         vsi->seid, &cld_filter->element, 1);
7963                 if (ret < 0) {
7964                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7965                         rte_free(cld_filter);
7966                         return -ENOTSUP;
7967                 }
7968                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7969                 if (tunnel == NULL) {
7970                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7971                         rte_free(cld_filter);
7972                         return -ENOMEM;
7973                 }
7974
7975                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7976                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7977                 if (ret < 0)
7978                         rte_free(tunnel);
7979         } else {
7980                 if (big_buffer)
7981                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7982                                 hw, vsi->seid, cld_filter, 1);
7983                 else
7984                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7985                                                    &cld_filter->element, 1);
7986                 if (ret < 0) {
7987                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7988                         rte_free(cld_filter);
7989                         return -ENOTSUP;
7990                 }
7991                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7992         }
7993
7994         rte_free(cld_filter);
7995         return ret;
7996 }
7997
7998 static int
7999 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8000 {
8001         uint8_t i;
8002
8003         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8004                 if (pf->vxlan_ports[i] == port)
8005                         return i;
8006         }
8007
8008         return -1;
8009 }
8010
8011 static int
8012 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
8013 {
8014         int  idx, ret;
8015         uint8_t filter_idx;
8016         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8017
8018         idx = i40e_get_vxlan_port_idx(pf, port);
8019
8020         /* Check if port already exists */
8021         if (idx >= 0) {
8022                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8023                 return -EINVAL;
8024         }
8025
8026         /* Now check if there is space to add the new port */
8027         idx = i40e_get_vxlan_port_idx(pf, 0);
8028         if (idx < 0) {
8029                 PMD_DRV_LOG(ERR,
8030                         "Maximum number of UDP ports reached, not adding port %d",
8031                         port);
8032                 return -ENOSPC;
8033         }
8034
8035         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
8036                                         &filter_idx, NULL);
8037         if (ret < 0) {
8038                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8039                 return -1;
8040         }
8041
8042         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8043                          port,  filter_idx);
8044
8045         /* New port: add it and mark its index in the bitmap */
8046         pf->vxlan_ports[idx] = port;
8047         pf->vxlan_bitmap |= (1 << idx);
8048
8049         if (!(pf->flags & I40E_FLAG_VXLAN))
8050                 pf->flags |= I40E_FLAG_VXLAN;
8051
8052         return 0;
8053 }
8054
8055 static int
8056 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8057 {
8058         int idx;
8059         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8060
8061         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8062                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8063                 return -EINVAL;
8064         }
8065
8066         idx = i40e_get_vxlan_port_idx(pf, port);
8067
8068         if (idx < 0) {
8069                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8070                 return -EINVAL;
8071         }
8072
8073         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8074                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8075                 return -1;
8076         }
8077
8078         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8079                         port, idx);
8080
8081         pf->vxlan_ports[idx] = 0;
8082         pf->vxlan_bitmap &= ~(1 << idx);
8083
8084         if (!pf->vxlan_bitmap)
8085                 pf->flags &= ~I40E_FLAG_VXLAN;
8086
8087         return 0;
8088 }
8089
8090 /* Add UDP tunneling port */
8091 static int
8092 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8093                              struct rte_eth_udp_tunnel *udp_tunnel)
8094 {
8095         int ret = 0;
8096         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8097
8098         if (udp_tunnel == NULL)
8099                 return -EINVAL;
8100
8101         switch (udp_tunnel->prot_type) {
8102         case RTE_TUNNEL_TYPE_VXLAN:
8103                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8104                 break;
8105
8106         case RTE_TUNNEL_TYPE_GENEVE:
8107         case RTE_TUNNEL_TYPE_TEREDO:
8108                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8109                 ret = -1;
8110                 break;
8111
8112         default:
8113                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8114                 ret = -1;
8115                 break;
8116         }
8117
8118         return ret;
8119 }
8120
8121 /* Remove UDP tunneling port */
8122 static int
8123 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8124                              struct rte_eth_udp_tunnel *udp_tunnel)
8125 {
8126         int ret = 0;
8127         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8128
8129         if (udp_tunnel == NULL)
8130                 return -EINVAL;
8131
8132         switch (udp_tunnel->prot_type) {
8133         case RTE_TUNNEL_TYPE_VXLAN:
8134                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8135                 break;
8136         case RTE_TUNNEL_TYPE_GENEVE:
8137         case RTE_TUNNEL_TYPE_TEREDO:
8138                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8139                 ret = -1;
8140                 break;
8141         default:
8142                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8143                 ret = -1;
8144                 break;
8145         }
8146
8147         return ret;
8148 }
8149
8150 /* Calculate the maximum number of contiguous PF queues that are configured */
8151 static int
8152 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8153 {
8154         struct rte_eth_dev_data *data = pf->dev_data;
8155         int i, num;
8156         struct i40e_rx_queue *rxq;
8157
8158         num = 0;
8159         for (i = 0; i < pf->lan_nb_qps; i++) {
8160                 rxq = data->rx_queues[i];
8161                 if (rxq && rxq->q_set)
8162                         num++;
8163                 else
8164                         break;
8165         }
8166
8167         return num;
8168 }
8169
8170 /* Configure RSS */
8171 static int
8172 i40e_pf_config_rss(struct i40e_pf *pf)
8173 {
8174         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8175         struct rte_eth_rss_conf rss_conf;
8176         uint32_t i, lut = 0;
8177         uint16_t j, num;
8178
8179         /*
8180          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8181          * It's necessary to calculate the actual PF queues that are configured.
8182          */
8183         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8184                 num = i40e_pf_calc_configured_queues_num(pf);
8185         else
8186                 num = pf->dev_data->nb_rx_queues;
8187
8188         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8189         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8190                         num);
8191
8192         if (num == 0) {
8193                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8194                 return -ENOTSUP;
8195         }
8196
8197         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8198                 if (j == num)
8199                         j = 0;
8200                 lut = (lut << 8) | (j & ((0x1 <<
8201                         hw->func_caps.rss_table_entry_width) - 1));
8202                 if ((i & 3) == 3)
8203                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8204         }
8205
8206         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8207         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8208                 i40e_pf_disable_rss(pf);
8209                 return 0;
8210         }
8211         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8212                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8213                 /* Random default keys */
8214                 static uint32_t rss_key_default[] = {0x6b793944,
8215                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8216                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8217                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8218
8219                 rss_conf.rss_key = (uint8_t *)rss_key_default;
8220                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8221                                                         sizeof(uint32_t);
8222         }
8223
8224         return i40e_hw_rss_hash_set(pf, &rss_conf);
8225 }
8226
8227 static int
8228 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8229                                struct rte_eth_tunnel_filter_conf *filter)
8230 {
8231         if (pf == NULL || filter == NULL) {
8232                 PMD_DRV_LOG(ERR, "Invalid parameter");
8233                 return -EINVAL;
8234         }
8235
8236         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8237                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8238                 return -EINVAL;
8239         }
8240
8241         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8242                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8243                 return -EINVAL;
8244         }
8245
8246         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8247                 (is_zero_ether_addr(&filter->outer_mac))) {
8248                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8249                 return -EINVAL;
8250         }
8251
8252         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8253                 (is_zero_ether_addr(&filter->inner_mac))) {
8254                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8255                 return -EINVAL;
8256         }
8257
8258         return 0;
8259 }
8260
8261 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8262 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8263 static int
8264 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8265 {
8266         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8267         uint32_t val, reg;
8268         int ret = -EINVAL;
8269
8270         if (pf->support_multi_driver) {
8271                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8272                 return -ENOTSUP;
8273         }
8274
8275         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8276         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8277
8278         if (len == 3) {
8279                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8280         } else if (len == 4) {
8281                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8282         } else {
8283                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8284                 return ret;
8285         }
8286
8287         if (reg != val) {
8288                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8289                                                    reg, NULL);
8290                 if (ret != 0)
8291                         return ret;
8292                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8293                             "with value 0x%08x",
8294                             I40E_GL_PRS_FVBM(2), reg);
8295                 i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
8296         } else {
8297                 ret = 0;
8298         }
8299         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8300                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8301
8302         return ret;
8303 }
8304
8305 static int
8306 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8307 {
8308         int ret = -EINVAL;
8309
8310         if (!hw || !cfg)
8311                 return -EINVAL;
8312
8313         switch (cfg->cfg_type) {
8314         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8315                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8316                 break;
8317         default:
8318                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8319                 break;
8320         }
8321
8322         return ret;
8323 }
8324
8325 static int
8326 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8327                                enum rte_filter_op filter_op,
8328                                void *arg)
8329 {
8330         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8331         int ret = I40E_ERR_PARAM;
8332
8333         switch (filter_op) {
8334         case RTE_ETH_FILTER_SET:
8335                 ret = i40e_dev_global_config_set(hw,
8336                         (struct rte_eth_global_cfg *)arg);
8337                 break;
8338         default:
8339                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8340                 break;
8341         }
8342
8343         return ret;
8344 }
8345
8346 static int
8347 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8348                           enum rte_filter_op filter_op,
8349                           void *arg)
8350 {
8351         struct rte_eth_tunnel_filter_conf *filter;
8352         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8353         int ret = I40E_SUCCESS;
8354
8355         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8356
8357         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8358                 return I40E_ERR_PARAM;
8359
8360         switch (filter_op) {
8361         case RTE_ETH_FILTER_NOP:
8362                 if (!(pf->flags & I40E_FLAG_VXLAN))
8363                         ret = I40E_NOT_SUPPORTED;
8364                 break;
8365         case RTE_ETH_FILTER_ADD:
8366                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8367                 break;
8368         case RTE_ETH_FILTER_DELETE:
8369                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8370                 break;
8371         default:
8372                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8373                 ret = I40E_ERR_PARAM;
8374                 break;
8375         }
8376
8377         return ret;
8378 }
8379
8380 static int
8381 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8382 {
8383         int ret = 0;
8384         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8385
8386         /* RSS setup */
8387         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8388                 ret = i40e_pf_config_rss(pf);
8389         else
8390                 i40e_pf_disable_rss(pf);
8391
8392         return ret;
8393 }
8394
8395 /* Get the symmetric hash enable configurations per port */
8396 static void
8397 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8398 {
8399         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8400
8401         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8402 }
8403
8404 /* Set the symmetric hash enable configurations per port */
8405 static void
8406 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8407 {
8408         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8409
8410         if (enable > 0) {
8411                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8412                         PMD_DRV_LOG(INFO,
8413                                 "Symmetric hash has already been enabled");
8414                         return;
8415                 }
8416                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8417         } else {
8418                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8419                         PMD_DRV_LOG(INFO,
8420                                 "Symmetric hash has already been disabled");
8421                         return;
8422                 }
8423                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8424         }
8425         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8426         I40E_WRITE_FLUSH(hw);
8427 }
8428
8429 /*
8430  * Get global configurations of hash function type and symmetric hash enable
8431  * per flow type (pctype). Note that global configuration means it affects all
8432  * the ports on the same NIC.
8433  */
8434 static int
8435 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8436                                    struct rte_eth_hash_global_conf *g_cfg)
8437 {
8438         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8439         uint32_t reg;
8440         uint16_t i, j;
8441
8442         memset(g_cfg, 0, sizeof(*g_cfg));
8443         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8444         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8445                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8446         else
8447                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8448         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8449                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8450
8451         /*
8452          * As i40e supports less than 64 flow types, only first 64 bits need to
8453          * be checked.
8454          */
8455         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8456                 g_cfg->valid_bit_mask[i] = 0ULL;
8457                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8458         }
8459
8460         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8461
8462         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8463                 if (!adapter->pctypes_tbl[i])
8464                         continue;
8465                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8466                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8467                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8468                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8469                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8470                                         g_cfg->sym_hash_enable_mask[0] |=
8471                                                                 (1ULL << i);
8472                                 }
8473                         }
8474                 }
8475         }
8476
8477         return 0;
8478 }
8479
8480 static int
8481 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8482                               const struct rte_eth_hash_global_conf *g_cfg)
8483 {
8484         uint32_t i;
8485         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8486
8487         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8488                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8489                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8490                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8491                                                 g_cfg->hash_func);
8492                 return -EINVAL;
8493         }
8494
8495         /*
8496          * As i40e supports less than 64 flow types, only first 64 bits need to
8497          * be checked.
8498          */
8499         mask0 = g_cfg->valid_bit_mask[0];
8500         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8501                 if (i == 0) {
8502                         /* Check if any unsupported flow type configured */
8503                         if ((mask0 | i40e_mask) ^ i40e_mask)
8504                                 goto mask_err;
8505                 } else {
8506                         if (g_cfg->valid_bit_mask[i])
8507                                 goto mask_err;
8508                 }
8509         }
8510
8511         return 0;
8512
8513 mask_err:
8514         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8515
8516         return -EINVAL;
8517 }
8518
8519 /*
8520  * Set global configurations of hash function type and symmetric hash enable
8521  * per flow type (pctype). Note any modifying global configuration will affect
8522  * all the ports on the same NIC.
8523  */
8524 static int
8525 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8526                                    struct rte_eth_hash_global_conf *g_cfg)
8527 {
8528         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8529         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8530         int ret;
8531         uint16_t i, j;
8532         uint32_t reg;
8533         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8534
8535         if (pf->support_multi_driver) {
8536                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8537                 return -ENOTSUP;
8538         }
8539
8540         /* Check the input parameters */
8541         ret = i40e_hash_global_config_check(adapter, g_cfg);
8542         if (ret < 0)
8543                 return ret;
8544
8545         /*
8546          * As i40e supports less than 64 flow types, only first 64 bits need to
8547          * be configured.
8548          */
8549         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8550                 if (mask0 & (1UL << i)) {
8551                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8552                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8553
8554                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8555                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8556                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8557                                         i40e_write_global_rx_ctl(hw,
8558                                                           I40E_GLQF_HSYM(j),
8559                                                           reg);
8560                         }
8561                         i40e_global_cfg_warning(I40E_WARNING_HSYM);
8562                 }
8563         }
8564
8565         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8566         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8567                 /* Toeplitz */
8568                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8569                         PMD_DRV_LOG(DEBUG,
8570                                 "Hash function already set to Toeplitz");
8571                         goto out;
8572                 }
8573                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8574         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8575                 /* Simple XOR */
8576                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8577                         PMD_DRV_LOG(DEBUG,
8578                                 "Hash function already set to Simple XOR");
8579                         goto out;
8580                 }
8581                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8582         } else
8583                 /* Use the default, and keep it as it is */
8584                 goto out;
8585
8586         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8587         i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
8588
8589 out:
8590         I40E_WRITE_FLUSH(hw);
8591
8592         return 0;
8593 }
8594
8595 /**
8596  * Valid input sets for hash and flow director filters per PCTYPE
8597  */
8598 static uint64_t
8599 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8600                 enum rte_filter_type filter)
8601 {
8602         uint64_t valid;
8603
8604         static const uint64_t valid_hash_inset_table[] = {
8605                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8606                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8607                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8608                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8609                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8610                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8611                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8612                         I40E_INSET_FLEX_PAYLOAD,
8613                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8614                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8615                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8616                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8617                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8618                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8619                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8620                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8621                         I40E_INSET_FLEX_PAYLOAD,
8622                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8623                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8624                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8625                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8626                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8627                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8628                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8629                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8630                         I40E_INSET_FLEX_PAYLOAD,
8631                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8632                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8633                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8634                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8635                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8636                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8637                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8638                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8639                         I40E_INSET_FLEX_PAYLOAD,
8640                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8641                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8642                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8643                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8644                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8645                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8646                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8647                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8648                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8649                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8650                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8651                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8652                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8653                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8654                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8655                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8656                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8657                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8658                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8659                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8660                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8661                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8662                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8663                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8664                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8665                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8666                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8667                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8668                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8669                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8670                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8671                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8672                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8673                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8674                         I40E_INSET_FLEX_PAYLOAD,
8675                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8676                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8677                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8678                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8679                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8680                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8681                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8682                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8683                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8684                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8685                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8686                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8687                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8688                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8689                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8690                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8691                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8692                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8693                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8694                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8695                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8696                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8697                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8698                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8699                         I40E_INSET_FLEX_PAYLOAD,
8700                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8701                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8702                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8703                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8704                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8705                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8706                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8707                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8708                         I40E_INSET_FLEX_PAYLOAD,
8709                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8710                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8711                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8712                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8713                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8714                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8715                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8716                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8717                         I40E_INSET_FLEX_PAYLOAD,
8718                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8719                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8720                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8721                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8722                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8723                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8724                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8725                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8726                         I40E_INSET_FLEX_PAYLOAD,
8727                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8728                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8729                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8730                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8731                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8732                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8733                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8734                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8735                         I40E_INSET_FLEX_PAYLOAD,
8736                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8737                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8738                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8739                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8740                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8741                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8742                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8743                         I40E_INSET_FLEX_PAYLOAD,
8744                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8745                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8746                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8747                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8748                         I40E_INSET_FLEX_PAYLOAD,
8749         };
8750
8751         /**
8752          * Flow director supports only fields defined in
8753          * union rte_eth_fdir_flow.
8754          */
8755         static const uint64_t valid_fdir_inset_table[] = {
8756                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8757                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8758                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8759                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8760                 I40E_INSET_IPV4_TTL,
8761                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8762                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8763                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8764                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8765                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8766                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8767                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8768                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8769                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8770                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8771                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8772                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8773                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8774                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8775                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8776                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8777                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8778                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8779                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8780                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8781                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8782                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8783                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8784                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8785                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8786                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8787                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8788                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8789                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8790                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8791                 I40E_INSET_SCTP_VT,
8792                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8793                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8794                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8795                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8796                 I40E_INSET_IPV4_TTL,
8797                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8798                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8799                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8800                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8801                 I40E_INSET_IPV6_HOP_LIMIT,
8802                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8803                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8804                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8805                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8806                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8807                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8808                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8809                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8810                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8811                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8812                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8813                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8814                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8815                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8816                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8817                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8818                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8819                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8820                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8821                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8822                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8823                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8824                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8825                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8826                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8827                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8828                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8829                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8830                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8831                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8832                 I40E_INSET_SCTP_VT,
8833                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8834                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8835                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8836                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8837                 I40E_INSET_IPV6_HOP_LIMIT,
8838                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8839                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8840                 I40E_INSET_LAST_ETHER_TYPE,
8841         };
8842
8843         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8844                 return 0;
8845         if (filter == RTE_ETH_FILTER_HASH)
8846                 valid = valid_hash_inset_table[pctype];
8847         else
8848                 valid = valid_fdir_inset_table[pctype];
8849
8850         return valid;
8851 }
8852
8853 /**
8854  * Validate if the input set is allowed for a specific PCTYPE
8855  */
8856 int
8857 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8858                 enum rte_filter_type filter, uint64_t inset)
8859 {
8860         uint64_t valid;
8861
8862         valid = i40e_get_valid_input_set(pctype, filter);
8863         if (inset & (~valid))
8864                 return -EINVAL;
8865
8866         return 0;
8867 }
8868
8869 /* default input set fields combination per pctype */
8870 uint64_t
8871 i40e_get_default_input_set(uint16_t pctype)
8872 {
8873         static const uint64_t default_inset_table[] = {
8874                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8875                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8876                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8877                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8878                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8879                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8880                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8881                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8882                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8883                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8884                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8885                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8886                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8887                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8888                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8889                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8890                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8891                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8892                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8893                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8894                         I40E_INSET_SCTP_VT,
8895                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8896                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8897                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8898                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8899                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8900                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8901                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8902                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8903                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8904                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8905                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8906                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8907                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8908                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8909                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8910                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8911                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8912                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8913                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8914                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8915                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8916                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8917                         I40E_INSET_SCTP_VT,
8918                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8919                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8920                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8921                         I40E_INSET_LAST_ETHER_TYPE,
8922         };
8923
8924         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8925                 return 0;
8926
8927         return default_inset_table[pctype];
8928 }
8929
8930 /**
8931  * Parse the input set from index to logical bit masks
8932  */
8933 static int
8934 i40e_parse_input_set(uint64_t *inset,
8935                      enum i40e_filter_pctype pctype,
8936                      enum rte_eth_input_set_field *field,
8937                      uint16_t size)
8938 {
8939         uint16_t i, j;
8940         int ret = -EINVAL;
8941
8942         static const struct {
8943                 enum rte_eth_input_set_field field;
8944                 uint64_t inset;
8945         } inset_convert_table[] = {
8946                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8947                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8948                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8949                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8950                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8951                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8952                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8953                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8954                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8955                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8956                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8957                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8958                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8959                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8960                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8961                         I40E_INSET_IPV6_NEXT_HDR},
8962                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8963                         I40E_INSET_IPV6_HOP_LIMIT},
8964                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8965                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8966                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8967                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8968                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8969                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8970                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8971                         I40E_INSET_SCTP_VT},
8972                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8973                         I40E_INSET_TUNNEL_DMAC},
8974                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8975                         I40E_INSET_VLAN_TUNNEL},
8976                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8977                         I40E_INSET_TUNNEL_ID},
8978                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8979                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8980                         I40E_INSET_FLEX_PAYLOAD_W1},
8981                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8982                         I40E_INSET_FLEX_PAYLOAD_W2},
8983                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8984                         I40E_INSET_FLEX_PAYLOAD_W3},
8985                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8986                         I40E_INSET_FLEX_PAYLOAD_W4},
8987                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8988                         I40E_INSET_FLEX_PAYLOAD_W5},
8989                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8990                         I40E_INSET_FLEX_PAYLOAD_W6},
8991                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8992                         I40E_INSET_FLEX_PAYLOAD_W7},
8993                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8994                         I40E_INSET_FLEX_PAYLOAD_W8},
8995         };
8996
8997         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8998                 return ret;
8999
9000         /* Only one item allowed for default or all */
9001         if (size == 1) {
9002                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9003                         *inset = i40e_get_default_input_set(pctype);
9004                         return 0;
9005                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9006                         *inset = I40E_INSET_NONE;
9007                         return 0;
9008                 }
9009         }
9010
9011         for (i = 0, *inset = 0; i < size; i++) {
9012                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9013                         if (field[i] == inset_convert_table[j].field) {
9014                                 *inset |= inset_convert_table[j].inset;
9015                                 break;
9016                         }
9017                 }
9018
9019                 /* It contains unsupported input set, return immediately */
9020                 if (j == RTE_DIM(inset_convert_table))
9021                         return ret;
9022         }
9023
9024         return 0;
9025 }
9026
9027 /**
9028  * Translate the input set from bit masks to register aware bit masks
9029  * and vice versa
9030  */
9031 uint64_t
9032 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9033 {
9034         uint64_t val = 0;
9035         uint16_t i;
9036
9037         struct inset_map {
9038                 uint64_t inset;
9039                 uint64_t inset_reg;
9040         };
9041
9042         static const struct inset_map inset_map_common[] = {
9043                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9044                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9045                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9046                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9047                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9048                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9049                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9050                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9051                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9052                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9053                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9054                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9055                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9056                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9057                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9058                 {I40E_INSET_TUNNEL_DMAC,
9059                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9060                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9061                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9062                 {I40E_INSET_TUNNEL_SRC_PORT,
9063                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9064                 {I40E_INSET_TUNNEL_DST_PORT,
9065                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9066                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9067                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9068                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9069                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9070                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9071                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9072                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9073                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9074                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9075         };
9076
9077     /* some different registers map in x722*/
9078         static const struct inset_map inset_map_diff_x722[] = {
9079                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9080                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9081                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9082                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9083         };
9084
9085         static const struct inset_map inset_map_diff_not_x722[] = {
9086                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9087                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9088                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9089                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9090         };
9091
9092         if (input == 0)
9093                 return val;
9094
9095         /* Translate input set to register aware inset */
9096         if (type == I40E_MAC_X722) {
9097                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9098                         if (input & inset_map_diff_x722[i].inset)
9099                                 val |= inset_map_diff_x722[i].inset_reg;
9100                 }
9101         } else {
9102                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9103                         if (input & inset_map_diff_not_x722[i].inset)
9104                                 val |= inset_map_diff_not_x722[i].inset_reg;
9105                 }
9106         }
9107
9108         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9109                 if (input & inset_map_common[i].inset)
9110                         val |= inset_map_common[i].inset_reg;
9111         }
9112
9113         return val;
9114 }
9115
9116 int
9117 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9118 {
9119         uint8_t i, idx = 0;
9120         uint64_t inset_need_mask = inset;
9121
9122         static const struct {
9123                 uint64_t inset;
9124                 uint32_t mask;
9125         } inset_mask_map[] = {
9126                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9127                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9128                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9129                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9130                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9131                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9132                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9133                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9134         };
9135
9136         if (!inset || !mask || !nb_elem)
9137                 return 0;
9138
9139         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9140                 /* Clear the inset bit, if no MASK is required,
9141                  * for example proto + ttl
9142                  */
9143                 if ((inset & inset_mask_map[i].inset) ==
9144                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9145                         inset_need_mask &= ~inset_mask_map[i].inset;
9146                 if (!inset_need_mask)
9147                         return 0;
9148         }
9149         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9150                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9151                     inset_mask_map[i].inset) {
9152                         if (idx >= nb_elem) {
9153                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9154                                 return -EINVAL;
9155                         }
9156                         mask[idx] = inset_mask_map[i].mask;
9157                         idx++;
9158                 }
9159         }
9160
9161         return idx;
9162 }
9163
9164 void
9165 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9166 {
9167         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9168
9169         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9170         if (reg != val)
9171                 i40e_write_rx_ctl(hw, addr, val);
9172         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9173                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9174 }
9175
9176 void
9177 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9178 {
9179         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9180
9181         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9182         if (reg != val)
9183                 i40e_write_global_rx_ctl(hw, addr, val);
9184         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9185                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9186 }
9187
9188 static void
9189 i40e_filter_input_set_init(struct i40e_pf *pf)
9190 {
9191         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9192         enum i40e_filter_pctype pctype;
9193         uint64_t input_set, inset_reg;
9194         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9195         int num, i;
9196         uint16_t flow_type;
9197
9198         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9199              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9200                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9201
9202                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9203                         continue;
9204
9205                 input_set = i40e_get_default_input_set(pctype);
9206
9207                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9208                                                    I40E_INSET_MASK_NUM_REG);
9209                 if (num < 0)
9210                         return;
9211                 if (pf->support_multi_driver && num > 0) {
9212                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9213                         return;
9214                 }
9215                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9216                                         input_set);
9217
9218                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9219                                       (uint32_t)(inset_reg & UINT32_MAX));
9220                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9221                                      (uint32_t)((inset_reg >>
9222                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9223                 if (!pf->support_multi_driver) {
9224                         i40e_check_write_global_reg(hw,
9225                                             I40E_GLQF_HASH_INSET(0, pctype),
9226                                             (uint32_t)(inset_reg & UINT32_MAX));
9227                         i40e_check_write_global_reg(hw,
9228                                              I40E_GLQF_HASH_INSET(1, pctype),
9229                                              (uint32_t)((inset_reg >>
9230                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9231
9232                         for (i = 0; i < num; i++) {
9233                                 i40e_check_write_global_reg(hw,
9234                                                     I40E_GLQF_FD_MSK(i, pctype),
9235                                                     mask_reg[i]);
9236                                 i40e_check_write_global_reg(hw,
9237                                                   I40E_GLQF_HASH_MSK(i, pctype),
9238                                                   mask_reg[i]);
9239                         }
9240                         /*clear unused mask registers of the pctype */
9241                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9242                                 i40e_check_write_global_reg(hw,
9243                                                     I40E_GLQF_FD_MSK(i, pctype),
9244                                                     0);
9245                                 i40e_check_write_global_reg(hw,
9246                                                   I40E_GLQF_HASH_MSK(i, pctype),
9247                                                   0);
9248                         }
9249                 } else {
9250                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9251                 }
9252                 I40E_WRITE_FLUSH(hw);
9253
9254                 /* store the default input set */
9255                 if (!pf->support_multi_driver)
9256                         pf->hash_input_set[pctype] = input_set;
9257                 pf->fdir.input_set[pctype] = input_set;
9258         }
9259
9260         if (!pf->support_multi_driver) {
9261                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9262                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9263                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9264         }
9265 }
9266
9267 int
9268 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9269                          struct rte_eth_input_set_conf *conf)
9270 {
9271         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9272         enum i40e_filter_pctype pctype;
9273         uint64_t input_set, inset_reg = 0;
9274         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9275         int ret, i, num;
9276
9277         if (!conf) {
9278                 PMD_DRV_LOG(ERR, "Invalid pointer");
9279                 return -EFAULT;
9280         }
9281         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9282             conf->op != RTE_ETH_INPUT_SET_ADD) {
9283                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9284                 return -EINVAL;
9285         }
9286
9287         if (pf->support_multi_driver) {
9288                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9289                 return -ENOTSUP;
9290         }
9291
9292         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9293         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9294                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9295                 return -EINVAL;
9296         }
9297
9298         if (hw->mac.type == I40E_MAC_X722) {
9299                 /* get translated pctype value in fd pctype register */
9300                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9301                         I40E_GLQF_FD_PCTYPES((int)pctype));
9302         }
9303
9304         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9305                                    conf->inset_size);
9306         if (ret) {
9307                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9308                 return -EINVAL;
9309         }
9310
9311         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9312                 /* get inset value in register */
9313                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9314                 inset_reg <<= I40E_32_BIT_WIDTH;
9315                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9316                 input_set |= pf->hash_input_set[pctype];
9317         }
9318         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9319                                            I40E_INSET_MASK_NUM_REG);
9320         if (num < 0)
9321                 return -EINVAL;
9322
9323         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9324
9325         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9326                                     (uint32_t)(inset_reg & UINT32_MAX));
9327         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9328                                     (uint32_t)((inset_reg >>
9329                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9330         i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9331
9332         for (i = 0; i < num; i++)
9333                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9334                                             mask_reg[i]);
9335         /*clear unused mask registers of the pctype */
9336         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9337                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9338                                             0);
9339         i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9340         I40E_WRITE_FLUSH(hw);
9341
9342         pf->hash_input_set[pctype] = input_set;
9343         return 0;
9344 }
9345
9346 int
9347 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9348                          struct rte_eth_input_set_conf *conf)
9349 {
9350         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9351         enum i40e_filter_pctype pctype;
9352         uint64_t input_set, inset_reg = 0;
9353         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9354         int ret, i, num;
9355
9356         if (!hw || !conf) {
9357                 PMD_DRV_LOG(ERR, "Invalid pointer");
9358                 return -EFAULT;
9359         }
9360         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9361             conf->op != RTE_ETH_INPUT_SET_ADD) {
9362                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9363                 return -EINVAL;
9364         }
9365
9366         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9367
9368         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9369                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9370                 return -EINVAL;
9371         }
9372
9373         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9374                                    conf->inset_size);
9375         if (ret) {
9376                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9377                 return -EINVAL;
9378         }
9379
9380         /* get inset value in register */
9381         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9382         inset_reg <<= I40E_32_BIT_WIDTH;
9383         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9384
9385         /* Can not change the inset reg for flex payload for fdir,
9386          * it is done by writing I40E_PRTQF_FD_FLXINSET
9387          * in i40e_set_flex_mask_on_pctype.
9388          */
9389         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9390                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9391         else
9392                 input_set |= pf->fdir.input_set[pctype];
9393         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9394                                            I40E_INSET_MASK_NUM_REG);
9395         if (num < 0)
9396                 return -EINVAL;
9397         if (pf->support_multi_driver && num > 0) {
9398                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9399                 return -ENOTSUP;
9400         }
9401
9402         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9403
9404         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9405                               (uint32_t)(inset_reg & UINT32_MAX));
9406         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9407                              (uint32_t)((inset_reg >>
9408                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9409
9410         if (!pf->support_multi_driver) {
9411                 for (i = 0; i < num; i++)
9412                         i40e_check_write_global_reg(hw,
9413                                                     I40E_GLQF_FD_MSK(i, pctype),
9414                                                     mask_reg[i]);
9415                 /*clear unused mask registers of the pctype */
9416                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9417                         i40e_check_write_global_reg(hw,
9418                                                     I40E_GLQF_FD_MSK(i, pctype),
9419                                                     0);
9420                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9421         } else {
9422                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9423         }
9424         I40E_WRITE_FLUSH(hw);
9425
9426         pf->fdir.input_set[pctype] = input_set;
9427         return 0;
9428 }
9429
9430 static int
9431 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9432 {
9433         int ret = 0;
9434
9435         if (!hw || !info) {
9436                 PMD_DRV_LOG(ERR, "Invalid pointer");
9437                 return -EFAULT;
9438         }
9439
9440         switch (info->info_type) {
9441         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9442                 i40e_get_symmetric_hash_enable_per_port(hw,
9443                                         &(info->info.enable));
9444                 break;
9445         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9446                 ret = i40e_get_hash_filter_global_config(hw,
9447                                 &(info->info.global_conf));
9448                 break;
9449         default:
9450                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9451                                                         info->info_type);
9452                 ret = -EINVAL;
9453                 break;
9454         }
9455
9456         return ret;
9457 }
9458
9459 static int
9460 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9461 {
9462         int ret = 0;
9463
9464         if (!hw || !info) {
9465                 PMD_DRV_LOG(ERR, "Invalid pointer");
9466                 return -EFAULT;
9467         }
9468
9469         switch (info->info_type) {
9470         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9471                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9472                 break;
9473         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9474                 ret = i40e_set_hash_filter_global_config(hw,
9475                                 &(info->info.global_conf));
9476                 break;
9477         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9478                 ret = i40e_hash_filter_inset_select(hw,
9479                                                &(info->info.input_set_conf));
9480                 break;
9481
9482         default:
9483                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9484                                                         info->info_type);
9485                 ret = -EINVAL;
9486                 break;
9487         }
9488
9489         return ret;
9490 }
9491
9492 /* Operations for hash function */
9493 static int
9494 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9495                       enum rte_filter_op filter_op,
9496                       void *arg)
9497 {
9498         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9499         int ret = 0;
9500
9501         switch (filter_op) {
9502         case RTE_ETH_FILTER_NOP:
9503                 break;
9504         case RTE_ETH_FILTER_GET:
9505                 ret = i40e_hash_filter_get(hw,
9506                         (struct rte_eth_hash_filter_info *)arg);
9507                 break;
9508         case RTE_ETH_FILTER_SET:
9509                 ret = i40e_hash_filter_set(hw,
9510                         (struct rte_eth_hash_filter_info *)arg);
9511                 break;
9512         default:
9513                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9514                                                                 filter_op);
9515                 ret = -ENOTSUP;
9516                 break;
9517         }
9518
9519         return ret;
9520 }
9521
9522 /* Convert ethertype filter structure */
9523 static int
9524 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9525                               struct i40e_ethertype_filter *filter)
9526 {
9527         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9528         filter->input.ether_type = input->ether_type;
9529         filter->flags = input->flags;
9530         filter->queue = input->queue;
9531
9532         return 0;
9533 }
9534
9535 /* Check if there exists the ehtertype filter */
9536 struct i40e_ethertype_filter *
9537 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9538                                 const struct i40e_ethertype_filter_input *input)
9539 {
9540         int ret;
9541
9542         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9543         if (ret < 0)
9544                 return NULL;
9545
9546         return ethertype_rule->hash_map[ret];
9547 }
9548
9549 /* Add ethertype filter in SW list */
9550 static int
9551 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9552                                 struct i40e_ethertype_filter *filter)
9553 {
9554         struct i40e_ethertype_rule *rule = &pf->ethertype;
9555         int ret;
9556
9557         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9558         if (ret < 0) {
9559                 PMD_DRV_LOG(ERR,
9560                             "Failed to insert ethertype filter"
9561                             " to hash table %d!",
9562                             ret);
9563                 return ret;
9564         }
9565         rule->hash_map[ret] = filter;
9566
9567         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9568
9569         return 0;
9570 }
9571
9572 /* Delete ethertype filter in SW list */
9573 int
9574 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9575                              struct i40e_ethertype_filter_input *input)
9576 {
9577         struct i40e_ethertype_rule *rule = &pf->ethertype;
9578         struct i40e_ethertype_filter *filter;
9579         int ret;
9580
9581         ret = rte_hash_del_key(rule->hash_table, input);
9582         if (ret < 0) {
9583                 PMD_DRV_LOG(ERR,
9584                             "Failed to delete ethertype filter"
9585                             " to hash table %d!",
9586                             ret);
9587                 return ret;
9588         }
9589         filter = rule->hash_map[ret];
9590         rule->hash_map[ret] = NULL;
9591
9592         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9593         rte_free(filter);
9594
9595         return 0;
9596 }
9597
9598 /*
9599  * Configure ethertype filter, which can director packet by filtering
9600  * with mac address and ether_type or only ether_type
9601  */
9602 int
9603 i40e_ethertype_filter_set(struct i40e_pf *pf,
9604                         struct rte_eth_ethertype_filter *filter,
9605                         bool add)
9606 {
9607         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9608         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9609         struct i40e_ethertype_filter *ethertype_filter, *node;
9610         struct i40e_ethertype_filter check_filter;
9611         struct i40e_control_filter_stats stats;
9612         uint16_t flags = 0;
9613         int ret;
9614
9615         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9616                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9617                 return -EINVAL;
9618         }
9619         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9620                 filter->ether_type == ETHER_TYPE_IPv6) {
9621                 PMD_DRV_LOG(ERR,
9622                         "unsupported ether_type(0x%04x) in control packet filter.",
9623                         filter->ether_type);
9624                 return -EINVAL;
9625         }
9626         if (filter->ether_type == ETHER_TYPE_VLAN)
9627                 PMD_DRV_LOG(WARNING,
9628                         "filter vlan ether_type in first tag is not supported.");
9629
9630         /* Check if there is the filter in SW list */
9631         memset(&check_filter, 0, sizeof(check_filter));
9632         i40e_ethertype_filter_convert(filter, &check_filter);
9633         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9634                                                &check_filter.input);
9635         if (add && node) {
9636                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9637                 return -EINVAL;
9638         }
9639
9640         if (!add && !node) {
9641                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9642                 return -EINVAL;
9643         }
9644
9645         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9646                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9647         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9648                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9649         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9650
9651         memset(&stats, 0, sizeof(stats));
9652         ret = i40e_aq_add_rem_control_packet_filter(hw,
9653                         filter->mac_addr.addr_bytes,
9654                         filter->ether_type, flags,
9655                         pf->main_vsi->seid,
9656                         filter->queue, add, &stats, NULL);
9657
9658         PMD_DRV_LOG(INFO,
9659                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9660                 ret, stats.mac_etype_used, stats.etype_used,
9661                 stats.mac_etype_free, stats.etype_free);
9662         if (ret < 0)
9663                 return -ENOSYS;
9664
9665         /* Add or delete a filter in SW list */
9666         if (add) {
9667                 ethertype_filter = rte_zmalloc("ethertype_filter",
9668                                        sizeof(*ethertype_filter), 0);
9669                 if (ethertype_filter == NULL) {
9670                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9671                         return -ENOMEM;
9672                 }
9673
9674                 rte_memcpy(ethertype_filter, &check_filter,
9675                            sizeof(check_filter));
9676                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9677                 if (ret < 0)
9678                         rte_free(ethertype_filter);
9679         } else {
9680                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9681         }
9682
9683         return ret;
9684 }
9685
9686 /*
9687  * Handle operations for ethertype filter.
9688  */
9689 static int
9690 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9691                                 enum rte_filter_op filter_op,
9692                                 void *arg)
9693 {
9694         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9695         int ret = 0;
9696
9697         if (filter_op == RTE_ETH_FILTER_NOP)
9698                 return ret;
9699
9700         if (arg == NULL) {
9701                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9702                             filter_op);
9703                 return -EINVAL;
9704         }
9705
9706         switch (filter_op) {
9707         case RTE_ETH_FILTER_ADD:
9708                 ret = i40e_ethertype_filter_set(pf,
9709                         (struct rte_eth_ethertype_filter *)arg,
9710                         TRUE);
9711                 break;
9712         case RTE_ETH_FILTER_DELETE:
9713                 ret = i40e_ethertype_filter_set(pf,
9714                         (struct rte_eth_ethertype_filter *)arg,
9715                         FALSE);
9716                 break;
9717         default:
9718                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9719                 ret = -ENOSYS;
9720                 break;
9721         }
9722         return ret;
9723 }
9724
9725 static int
9726 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9727                      enum rte_filter_type filter_type,
9728                      enum rte_filter_op filter_op,
9729                      void *arg)
9730 {
9731         int ret = 0;
9732
9733         if (dev == NULL)
9734                 return -EINVAL;
9735
9736         switch (filter_type) {
9737         case RTE_ETH_FILTER_NONE:
9738                 /* For global configuration */
9739                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9740                 break;
9741         case RTE_ETH_FILTER_HASH:
9742                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9743                 break;
9744         case RTE_ETH_FILTER_MACVLAN:
9745                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9746                 break;
9747         case RTE_ETH_FILTER_ETHERTYPE:
9748                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9749                 break;
9750         case RTE_ETH_FILTER_TUNNEL:
9751                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9752                 break;
9753         case RTE_ETH_FILTER_FDIR:
9754                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9755                 break;
9756         case RTE_ETH_FILTER_GENERIC:
9757                 if (filter_op != RTE_ETH_FILTER_GET)
9758                         return -EINVAL;
9759                 *(const void **)arg = &i40e_flow_ops;
9760                 break;
9761         default:
9762                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9763                                                         filter_type);
9764                 ret = -EINVAL;
9765                 break;
9766         }
9767
9768         return ret;
9769 }
9770
9771 /*
9772  * Check and enable Extended Tag.
9773  * Enabling Extended Tag is important for 40G performance.
9774  */
9775 static void
9776 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9777 {
9778         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9779         uint32_t buf = 0;
9780         int ret;
9781
9782         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9783                                       PCI_DEV_CAP_REG);
9784         if (ret < 0) {
9785                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9786                             PCI_DEV_CAP_REG);
9787                 return;
9788         }
9789         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9790                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9791                 return;
9792         }
9793
9794         buf = 0;
9795         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9796                                       PCI_DEV_CTRL_REG);
9797         if (ret < 0) {
9798                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9799                             PCI_DEV_CTRL_REG);
9800                 return;
9801         }
9802         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9803                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9804                 return;
9805         }
9806         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9807         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9808                                        PCI_DEV_CTRL_REG);
9809         if (ret < 0) {
9810                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9811                             PCI_DEV_CTRL_REG);
9812                 return;
9813         }
9814 }
9815
9816 /*
9817  * As some registers wouldn't be reset unless a global hardware reset,
9818  * hardware initialization is needed to put those registers into an
9819  * expected initial state.
9820  */
9821 static void
9822 i40e_hw_init(struct rte_eth_dev *dev)
9823 {
9824         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9825
9826         i40e_enable_extended_tag(dev);
9827
9828         /* clear the PF Queue Filter control register */
9829         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9830
9831         /* Disable symmetric hash per port */
9832         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9833 }
9834
9835 /*
9836  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9837  * however this function will return only one highest pctype index,
9838  * which is not quite correct. This is known problem of i40e driver
9839  * and needs to be fixed later.
9840  */
9841 enum i40e_filter_pctype
9842 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9843 {
9844         int i;
9845         uint64_t pctype_mask;
9846
9847         if (flow_type < I40E_FLOW_TYPE_MAX) {
9848                 pctype_mask = adapter->pctypes_tbl[flow_type];
9849                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9850                         if (pctype_mask & (1ULL << i))
9851                                 return (enum i40e_filter_pctype)i;
9852                 }
9853         }
9854         return I40E_FILTER_PCTYPE_INVALID;
9855 }
9856
9857 uint16_t
9858 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9859                         enum i40e_filter_pctype pctype)
9860 {
9861         uint16_t flowtype;
9862         uint64_t pctype_mask = 1ULL << pctype;
9863
9864         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9865              flowtype++) {
9866                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9867                         return flowtype;
9868         }
9869
9870         return RTE_ETH_FLOW_UNKNOWN;
9871 }
9872
9873 /*
9874  * On X710, performance number is far from the expectation on recent firmware
9875  * versions; on XL710, performance number is also far from the expectation on
9876  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9877  * mode is enabled and port MAC address is equal to the packet destination MAC
9878  * address. The fix for this issue may not be integrated in the following
9879  * firmware version. So the workaround in software driver is needed. It needs
9880  * to modify the initial values of 3 internal only registers for both X710 and
9881  * XL710. Note that the values for X710 or XL710 could be different, and the
9882  * workaround can be removed when it is fixed in firmware in the future.
9883  */
9884
9885 /* For both X710 and XL710 */
9886 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9887 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9888 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9889
9890 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9891 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9892
9893 /* For X722 */
9894 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9895 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9896
9897 /* For X710 */
9898 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9899 /* For XL710 */
9900 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9901 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9902
9903 static int
9904 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9905 {
9906         enum i40e_status_code status;
9907         struct i40e_aq_get_phy_abilities_resp phy_ab;
9908         int ret = -ENOTSUP;
9909         int retries = 0;
9910
9911         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9912                                               NULL);
9913
9914         while (status) {
9915                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9916                         status);
9917                 retries++;
9918                 rte_delay_us(100000);
9919                 if  (retries < 5)
9920                         status = i40e_aq_get_phy_capabilities(hw, false,
9921                                         true, &phy_ab, NULL);
9922                 else
9923                         return ret;
9924         }
9925         return 0;
9926 }
9927
9928 static void
9929 i40e_configure_registers(struct i40e_hw *hw)
9930 {
9931         static struct {
9932                 uint32_t addr;
9933                 uint64_t val;
9934         } reg_table[] = {
9935                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9936                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9937                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9938         };
9939         uint64_t reg;
9940         uint32_t i;
9941         int ret;
9942
9943         for (i = 0; i < RTE_DIM(reg_table); i++) {
9944                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9945                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9946                                 reg_table[i].val =
9947                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9948                         else /* For X710/XL710/XXV710 */
9949                                 if (hw->aq.fw_maj_ver < 6)
9950                                         reg_table[i].val =
9951                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
9952                                 else
9953                                         reg_table[i].val =
9954                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
9955                 }
9956
9957                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9958                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9959                                 reg_table[i].val =
9960                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9961                         else /* For X710/XL710/XXV710 */
9962                                 reg_table[i].val =
9963                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9964                 }
9965
9966                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9967                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9968                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9969                                 reg_table[i].val =
9970                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9971                         else /* For X710 */
9972                                 reg_table[i].val =
9973                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9974                 }
9975
9976                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9977                                                         &reg, NULL);
9978                 if (ret < 0) {
9979                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9980                                                         reg_table[i].addr);
9981                         break;
9982                 }
9983                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9984                                                 reg_table[i].addr, reg);
9985                 if (reg == reg_table[i].val)
9986                         continue;
9987
9988                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9989                                                 reg_table[i].val, NULL);
9990                 if (ret < 0) {
9991                         PMD_DRV_LOG(ERR,
9992                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9993                                 reg_table[i].val, reg_table[i].addr);
9994                         break;
9995                 }
9996                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9997                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9998         }
9999 }
10000
10001 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
10002 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10003 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10004 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10005 static int
10006 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10007 {
10008         uint32_t reg;
10009         int ret;
10010
10011         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10012                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10013                 return -EINVAL;
10014         }
10015
10016         /* Configure for double VLAN RX stripping */
10017         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10018         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10019                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10020                 ret = i40e_aq_debug_write_register(hw,
10021                                                    I40E_VSI_TSR(vsi->vsi_id),
10022                                                    reg, NULL);
10023                 if (ret < 0) {
10024                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10025                                     vsi->vsi_id);
10026                         return I40E_ERR_CONFIG;
10027                 }
10028         }
10029
10030         /* Configure for double VLAN TX insertion */
10031         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10032         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10033                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10034                 ret = i40e_aq_debug_write_register(hw,
10035                                                    I40E_VSI_L2TAGSTXVALID(
10036                                                    vsi->vsi_id), reg, NULL);
10037                 if (ret < 0) {
10038                         PMD_DRV_LOG(ERR,
10039                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10040                                 vsi->vsi_id);
10041                         return I40E_ERR_CONFIG;
10042                 }
10043         }
10044
10045         return 0;
10046 }
10047
10048 /**
10049  * i40e_aq_add_mirror_rule
10050  * @hw: pointer to the hardware structure
10051  * @seid: VEB seid to add mirror rule to
10052  * @dst_id: destination vsi seid
10053  * @entries: Buffer which contains the entities to be mirrored
10054  * @count: number of entities contained in the buffer
10055  * @rule_id:the rule_id of the rule to be added
10056  *
10057  * Add a mirror rule for a given veb.
10058  *
10059  **/
10060 static enum i40e_status_code
10061 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10062                         uint16_t seid, uint16_t dst_id,
10063                         uint16_t rule_type, uint16_t *entries,
10064                         uint16_t count, uint16_t *rule_id)
10065 {
10066         struct i40e_aq_desc desc;
10067         struct i40e_aqc_add_delete_mirror_rule cmd;
10068         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10069                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10070                 &desc.params.raw;
10071         uint16_t buff_len;
10072         enum i40e_status_code status;
10073
10074         i40e_fill_default_direct_cmd_desc(&desc,
10075                                           i40e_aqc_opc_add_mirror_rule);
10076         memset(&cmd, 0, sizeof(cmd));
10077
10078         buff_len = sizeof(uint16_t) * count;
10079         desc.datalen = rte_cpu_to_le_16(buff_len);
10080         if (buff_len > 0)
10081                 desc.flags |= rte_cpu_to_le_16(
10082                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10083         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10084                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10085         cmd.num_entries = rte_cpu_to_le_16(count);
10086         cmd.seid = rte_cpu_to_le_16(seid);
10087         cmd.destination = rte_cpu_to_le_16(dst_id);
10088
10089         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10090         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10091         PMD_DRV_LOG(INFO,
10092                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10093                 hw->aq.asq_last_status, resp->rule_id,
10094                 resp->mirror_rules_used, resp->mirror_rules_free);
10095         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10096
10097         return status;
10098 }
10099
10100 /**
10101  * i40e_aq_del_mirror_rule
10102  * @hw: pointer to the hardware structure
10103  * @seid: VEB seid to add mirror rule to
10104  * @entries: Buffer which contains the entities to be mirrored
10105  * @count: number of entities contained in the buffer
10106  * @rule_id:the rule_id of the rule to be delete
10107  *
10108  * Delete a mirror rule for a given veb.
10109  *
10110  **/
10111 static enum i40e_status_code
10112 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10113                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10114                 uint16_t count, uint16_t rule_id)
10115 {
10116         struct i40e_aq_desc desc;
10117         struct i40e_aqc_add_delete_mirror_rule cmd;
10118         uint16_t buff_len = 0;
10119         enum i40e_status_code status;
10120         void *buff = NULL;
10121
10122         i40e_fill_default_direct_cmd_desc(&desc,
10123                                           i40e_aqc_opc_delete_mirror_rule);
10124         memset(&cmd, 0, sizeof(cmd));
10125         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10126                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10127                                                           I40E_AQ_FLAG_RD));
10128                 cmd.num_entries = count;
10129                 buff_len = sizeof(uint16_t) * count;
10130                 desc.datalen = rte_cpu_to_le_16(buff_len);
10131                 buff = (void *)entries;
10132         } else
10133                 /* rule id is filled in destination field for deleting mirror rule */
10134                 cmd.destination = rte_cpu_to_le_16(rule_id);
10135
10136         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10137                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10138         cmd.seid = rte_cpu_to_le_16(seid);
10139
10140         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10141         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10142
10143         return status;
10144 }
10145
10146 /**
10147  * i40e_mirror_rule_set
10148  * @dev: pointer to the hardware structure
10149  * @mirror_conf: mirror rule info
10150  * @sw_id: mirror rule's sw_id
10151  * @on: enable/disable
10152  *
10153  * set a mirror rule.
10154  *
10155  **/
10156 static int
10157 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10158                         struct rte_eth_mirror_conf *mirror_conf,
10159                         uint8_t sw_id, uint8_t on)
10160 {
10161         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10162         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10163         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10164         struct i40e_mirror_rule *parent = NULL;
10165         uint16_t seid, dst_seid, rule_id;
10166         uint16_t i, j = 0;
10167         int ret;
10168
10169         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10170
10171         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10172                 PMD_DRV_LOG(ERR,
10173                         "mirror rule can not be configured without veb or vfs.");
10174                 return -ENOSYS;
10175         }
10176         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10177                 PMD_DRV_LOG(ERR, "mirror table is full.");
10178                 return -ENOSPC;
10179         }
10180         if (mirror_conf->dst_pool > pf->vf_num) {
10181                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10182                                  mirror_conf->dst_pool);
10183                 return -EINVAL;
10184         }
10185
10186         seid = pf->main_vsi->veb->seid;
10187
10188         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10189                 if (sw_id <= it->index) {
10190                         mirr_rule = it;
10191                         break;
10192                 }
10193                 parent = it;
10194         }
10195         if (mirr_rule && sw_id == mirr_rule->index) {
10196                 if (on) {
10197                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10198                         return -EEXIST;
10199                 } else {
10200                         ret = i40e_aq_del_mirror_rule(hw, seid,
10201                                         mirr_rule->rule_type,
10202                                         mirr_rule->entries,
10203                                         mirr_rule->num_entries, mirr_rule->id);
10204                         if (ret < 0) {
10205                                 PMD_DRV_LOG(ERR,
10206                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10207                                         ret, hw->aq.asq_last_status);
10208                                 return -ENOSYS;
10209                         }
10210                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10211                         rte_free(mirr_rule);
10212                         pf->nb_mirror_rule--;
10213                         return 0;
10214                 }
10215         } else if (!on) {
10216                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10217                 return -ENOENT;
10218         }
10219
10220         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10221                                 sizeof(struct i40e_mirror_rule) , 0);
10222         if (!mirr_rule) {
10223                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10224                 return I40E_ERR_NO_MEMORY;
10225         }
10226         switch (mirror_conf->rule_type) {
10227         case ETH_MIRROR_VLAN:
10228                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10229                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10230                                 mirr_rule->entries[j] =
10231                                         mirror_conf->vlan.vlan_id[i];
10232                                 j++;
10233                         }
10234                 }
10235                 if (j == 0) {
10236                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10237                         rte_free(mirr_rule);
10238                         return -EINVAL;
10239                 }
10240                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10241                 break;
10242         case ETH_MIRROR_VIRTUAL_POOL_UP:
10243         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10244                 /* check if the specified pool bit is out of range */
10245                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10246                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10247                         rte_free(mirr_rule);
10248                         return -EINVAL;
10249                 }
10250                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10251                         if (mirror_conf->pool_mask & (1ULL << i)) {
10252                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10253                                 j++;
10254                         }
10255                 }
10256                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10257                         /* add pf vsi to entries */
10258                         mirr_rule->entries[j] = pf->main_vsi_seid;
10259                         j++;
10260                 }
10261                 if (j == 0) {
10262                         PMD_DRV_LOG(ERR, "pool is not specified.");
10263                         rte_free(mirr_rule);
10264                         return -EINVAL;
10265                 }
10266                 /* egress and ingress in aq commands means from switch but not port */
10267                 mirr_rule->rule_type =
10268                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10269                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10270                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10271                 break;
10272         case ETH_MIRROR_UPLINK_PORT:
10273                 /* egress and ingress in aq commands means from switch but not port*/
10274                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10275                 break;
10276         case ETH_MIRROR_DOWNLINK_PORT:
10277                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10278                 break;
10279         default:
10280                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10281                         mirror_conf->rule_type);
10282                 rte_free(mirr_rule);
10283                 return -EINVAL;
10284         }
10285
10286         /* If the dst_pool is equal to vf_num, consider it as PF */
10287         if (mirror_conf->dst_pool == pf->vf_num)
10288                 dst_seid = pf->main_vsi_seid;
10289         else
10290                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10291
10292         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10293                                       mirr_rule->rule_type, mirr_rule->entries,
10294                                       j, &rule_id);
10295         if (ret < 0) {
10296                 PMD_DRV_LOG(ERR,
10297                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10298                         ret, hw->aq.asq_last_status);
10299                 rte_free(mirr_rule);
10300                 return -ENOSYS;
10301         }
10302
10303         mirr_rule->index = sw_id;
10304         mirr_rule->num_entries = j;
10305         mirr_rule->id = rule_id;
10306         mirr_rule->dst_vsi_seid = dst_seid;
10307
10308         if (parent)
10309                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10310         else
10311                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10312
10313         pf->nb_mirror_rule++;
10314         return 0;
10315 }
10316
10317 /**
10318  * i40e_mirror_rule_reset
10319  * @dev: pointer to the device
10320  * @sw_id: mirror rule's sw_id
10321  *
10322  * reset a mirror rule.
10323  *
10324  **/
10325 static int
10326 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10327 {
10328         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10329         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10330         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10331         uint16_t seid;
10332         int ret;
10333
10334         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10335
10336         seid = pf->main_vsi->veb->seid;
10337
10338         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10339                 if (sw_id == it->index) {
10340                         mirr_rule = it;
10341                         break;
10342                 }
10343         }
10344         if (mirr_rule) {
10345                 ret = i40e_aq_del_mirror_rule(hw, seid,
10346                                 mirr_rule->rule_type,
10347                                 mirr_rule->entries,
10348                                 mirr_rule->num_entries, mirr_rule->id);
10349                 if (ret < 0) {
10350                         PMD_DRV_LOG(ERR,
10351                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10352                                 ret, hw->aq.asq_last_status);
10353                         return -ENOSYS;
10354                 }
10355                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10356                 rte_free(mirr_rule);
10357                 pf->nb_mirror_rule--;
10358         } else {
10359                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10360                 return -ENOENT;
10361         }
10362         return 0;
10363 }
10364
10365 static uint64_t
10366 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10367 {
10368         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10369         uint64_t systim_cycles;
10370
10371         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10372         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10373                         << 32;
10374
10375         return systim_cycles;
10376 }
10377
10378 static uint64_t
10379 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10380 {
10381         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10382         uint64_t rx_tstamp;
10383
10384         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10385         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10386                         << 32;
10387
10388         return rx_tstamp;
10389 }
10390
10391 static uint64_t
10392 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10393 {
10394         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10395         uint64_t tx_tstamp;
10396
10397         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10398         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10399                         << 32;
10400
10401         return tx_tstamp;
10402 }
10403
10404 static void
10405 i40e_start_timecounters(struct rte_eth_dev *dev)
10406 {
10407         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10408         struct i40e_adapter *adapter =
10409                         (struct i40e_adapter *)dev->data->dev_private;
10410         struct rte_eth_link link;
10411         uint32_t tsync_inc_l;
10412         uint32_t tsync_inc_h;
10413
10414         /* Get current link speed. */
10415         i40e_dev_link_update(dev, 1);
10416         rte_eth_linkstatus_get(dev, &link);
10417
10418         switch (link.link_speed) {
10419         case ETH_SPEED_NUM_40G:
10420                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10421                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10422                 break;
10423         case ETH_SPEED_NUM_10G:
10424                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10425                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10426                 break;
10427         case ETH_SPEED_NUM_1G:
10428                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10429                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10430                 break;
10431         default:
10432                 tsync_inc_l = 0x0;
10433                 tsync_inc_h = 0x0;
10434         }
10435
10436         /* Set the timesync increment value. */
10437         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10438         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10439
10440         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10441         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10442         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10443
10444         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10445         adapter->systime_tc.cc_shift = 0;
10446         adapter->systime_tc.nsec_mask = 0;
10447
10448         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10449         adapter->rx_tstamp_tc.cc_shift = 0;
10450         adapter->rx_tstamp_tc.nsec_mask = 0;
10451
10452         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10453         adapter->tx_tstamp_tc.cc_shift = 0;
10454         adapter->tx_tstamp_tc.nsec_mask = 0;
10455 }
10456
10457 static int
10458 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10459 {
10460         struct i40e_adapter *adapter =
10461                         (struct i40e_adapter *)dev->data->dev_private;
10462
10463         adapter->systime_tc.nsec += delta;
10464         adapter->rx_tstamp_tc.nsec += delta;
10465         adapter->tx_tstamp_tc.nsec += delta;
10466
10467         return 0;
10468 }
10469
10470 static int
10471 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10472 {
10473         uint64_t ns;
10474         struct i40e_adapter *adapter =
10475                         (struct i40e_adapter *)dev->data->dev_private;
10476
10477         ns = rte_timespec_to_ns(ts);
10478
10479         /* Set the timecounters to a new value. */
10480         adapter->systime_tc.nsec = ns;
10481         adapter->rx_tstamp_tc.nsec = ns;
10482         adapter->tx_tstamp_tc.nsec = ns;
10483
10484         return 0;
10485 }
10486
10487 static int
10488 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10489 {
10490         uint64_t ns, systime_cycles;
10491         struct i40e_adapter *adapter =
10492                         (struct i40e_adapter *)dev->data->dev_private;
10493
10494         systime_cycles = i40e_read_systime_cyclecounter(dev);
10495         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10496         *ts = rte_ns_to_timespec(ns);
10497
10498         return 0;
10499 }
10500
10501 static int
10502 i40e_timesync_enable(struct rte_eth_dev *dev)
10503 {
10504         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10505         uint32_t tsync_ctl_l;
10506         uint32_t tsync_ctl_h;
10507
10508         /* Stop the timesync system time. */
10509         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10510         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10511         /* Reset the timesync system time value. */
10512         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10513         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10514
10515         i40e_start_timecounters(dev);
10516
10517         /* Clear timesync registers. */
10518         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10519         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10520         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10521         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10522         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10523         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10524
10525         /* Enable timestamping of PTP packets. */
10526         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10527         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10528
10529         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10530         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10531         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10532
10533         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10534         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10535
10536         return 0;
10537 }
10538
10539 static int
10540 i40e_timesync_disable(struct rte_eth_dev *dev)
10541 {
10542         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10543         uint32_t tsync_ctl_l;
10544         uint32_t tsync_ctl_h;
10545
10546         /* Disable timestamping of transmitted PTP packets. */
10547         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10548         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10549
10550         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10551         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10552
10553         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10554         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10555
10556         /* Reset the timesync increment value. */
10557         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10558         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10559
10560         return 0;
10561 }
10562
10563 static int
10564 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10565                                 struct timespec *timestamp, uint32_t flags)
10566 {
10567         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10568         struct i40e_adapter *adapter =
10569                 (struct i40e_adapter *)dev->data->dev_private;
10570
10571         uint32_t sync_status;
10572         uint32_t index = flags & 0x03;
10573         uint64_t rx_tstamp_cycles;
10574         uint64_t ns;
10575
10576         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10577         if ((sync_status & (1 << index)) == 0)
10578                 return -EINVAL;
10579
10580         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10581         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10582         *timestamp = rte_ns_to_timespec(ns);
10583
10584         return 0;
10585 }
10586
10587 static int
10588 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10589                                 struct timespec *timestamp)
10590 {
10591         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10592         struct i40e_adapter *adapter =
10593                 (struct i40e_adapter *)dev->data->dev_private;
10594
10595         uint32_t sync_status;
10596         uint64_t tx_tstamp_cycles;
10597         uint64_t ns;
10598
10599         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10600         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10601                 return -EINVAL;
10602
10603         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10604         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10605         *timestamp = rte_ns_to_timespec(ns);
10606
10607         return 0;
10608 }
10609
10610 /*
10611  * i40e_parse_dcb_configure - parse dcb configure from user
10612  * @dev: the device being configured
10613  * @dcb_cfg: pointer of the result of parse
10614  * @*tc_map: bit map of enabled traffic classes
10615  *
10616  * Returns 0 on success, negative value on failure
10617  */
10618 static int
10619 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10620                          struct i40e_dcbx_config *dcb_cfg,
10621                          uint8_t *tc_map)
10622 {
10623         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10624         uint8_t i, tc_bw, bw_lf;
10625
10626         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10627
10628         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10629         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10630                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10631                 return -EINVAL;
10632         }
10633
10634         /* assume each tc has the same bw */
10635         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10636         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10637                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10638         /* to ensure the sum of tcbw is equal to 100 */
10639         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10640         for (i = 0; i < bw_lf; i++)
10641                 dcb_cfg->etscfg.tcbwtable[i]++;
10642
10643         /* assume each tc has the same Transmission Selection Algorithm */
10644         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10645                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10646
10647         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10648                 dcb_cfg->etscfg.prioritytable[i] =
10649                                 dcb_rx_conf->dcb_tc[i];
10650
10651         /* FW needs one App to configure HW */
10652         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10653         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10654         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10655         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10656
10657         if (dcb_rx_conf->nb_tcs == 0)
10658                 *tc_map = 1; /* tc0 only */
10659         else
10660                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10661
10662         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10663                 dcb_cfg->pfc.willing = 0;
10664                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10665                 dcb_cfg->pfc.pfcenable = *tc_map;
10666         }
10667         return 0;
10668 }
10669
10670
10671 static enum i40e_status_code
10672 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10673                               struct i40e_aqc_vsi_properties_data *info,
10674                               uint8_t enabled_tcmap)
10675 {
10676         enum i40e_status_code ret;
10677         int i, total_tc = 0;
10678         uint16_t qpnum_per_tc, bsf, qp_idx;
10679         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10680         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10681         uint16_t used_queues;
10682
10683         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10684         if (ret != I40E_SUCCESS)
10685                 return ret;
10686
10687         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10688                 if (enabled_tcmap & (1 << i))
10689                         total_tc++;
10690         }
10691         if (total_tc == 0)
10692                 total_tc = 1;
10693         vsi->enabled_tc = enabled_tcmap;
10694
10695         /* different VSI has different queues assigned */
10696         if (vsi->type == I40E_VSI_MAIN)
10697                 used_queues = dev_data->nb_rx_queues -
10698                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10699         else if (vsi->type == I40E_VSI_VMDQ2)
10700                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10701         else {
10702                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10703                 return I40E_ERR_NO_AVAILABLE_VSI;
10704         }
10705
10706         qpnum_per_tc = used_queues / total_tc;
10707         /* Number of queues per enabled TC */
10708         if (qpnum_per_tc == 0) {
10709                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10710                 return I40E_ERR_INVALID_QP_ID;
10711         }
10712         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10713                                 I40E_MAX_Q_PER_TC);
10714         bsf = rte_bsf32(qpnum_per_tc);
10715
10716         /**
10717          * Configure TC and queue mapping parameters, for enabled TC,
10718          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10719          * default queue will serve it.
10720          */
10721         qp_idx = 0;
10722         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10723                 if (vsi->enabled_tc & (1 << i)) {
10724                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10725                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10726                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10727                         qp_idx += qpnum_per_tc;
10728                 } else
10729                         info->tc_mapping[i] = 0;
10730         }
10731
10732         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10733         if (vsi->type == I40E_VSI_SRIOV) {
10734                 info->mapping_flags |=
10735                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10736                 for (i = 0; i < vsi->nb_qps; i++)
10737                         info->queue_mapping[i] =
10738                                 rte_cpu_to_le_16(vsi->base_queue + i);
10739         } else {
10740                 info->mapping_flags |=
10741                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10742                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10743         }
10744         info->valid_sections |=
10745                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10746
10747         return I40E_SUCCESS;
10748 }
10749
10750 /*
10751  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10752  * @veb: VEB to be configured
10753  * @tc_map: enabled TC bitmap
10754  *
10755  * Returns 0 on success, negative value on failure
10756  */
10757 static enum i40e_status_code
10758 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10759 {
10760         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10761         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10762         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10763         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10764         enum i40e_status_code ret = I40E_SUCCESS;
10765         int i;
10766         uint32_t bw_max;
10767
10768         /* Check if enabled_tc is same as existing or new TCs */
10769         if (veb->enabled_tc == tc_map)
10770                 return ret;
10771
10772         /* configure tc bandwidth */
10773         memset(&veb_bw, 0, sizeof(veb_bw));
10774         veb_bw.tc_valid_bits = tc_map;
10775         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10776         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10777                 if (tc_map & BIT_ULL(i))
10778                         veb_bw.tc_bw_share_credits[i] = 1;
10779         }
10780         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10781                                                    &veb_bw, NULL);
10782         if (ret) {
10783                 PMD_INIT_LOG(ERR,
10784                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10785                         hw->aq.asq_last_status);
10786                 return ret;
10787         }
10788
10789         memset(&ets_query, 0, sizeof(ets_query));
10790         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10791                                                    &ets_query, NULL);
10792         if (ret != I40E_SUCCESS) {
10793                 PMD_DRV_LOG(ERR,
10794                         "Failed to get switch_comp ETS configuration %u",
10795                         hw->aq.asq_last_status);
10796                 return ret;
10797         }
10798         memset(&bw_query, 0, sizeof(bw_query));
10799         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10800                                                   &bw_query, NULL);
10801         if (ret != I40E_SUCCESS) {
10802                 PMD_DRV_LOG(ERR,
10803                         "Failed to get switch_comp bandwidth configuration %u",
10804                         hw->aq.asq_last_status);
10805                 return ret;
10806         }
10807
10808         /* store and print out BW info */
10809         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10810         veb->bw_info.bw_max = ets_query.tc_bw_max;
10811         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10812         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10813         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10814                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10815                      I40E_16_BIT_WIDTH);
10816         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10817                 veb->bw_info.bw_ets_share_credits[i] =
10818                                 bw_query.tc_bw_share_credits[i];
10819                 veb->bw_info.bw_ets_credits[i] =
10820                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10821                 /* 4 bits per TC, 4th bit is reserved */
10822                 veb->bw_info.bw_ets_max[i] =
10823                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10824                                   RTE_LEN2MASK(3, uint8_t));
10825                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10826                             veb->bw_info.bw_ets_share_credits[i]);
10827                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10828                             veb->bw_info.bw_ets_credits[i]);
10829                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10830                             veb->bw_info.bw_ets_max[i]);
10831         }
10832
10833         veb->enabled_tc = tc_map;
10834
10835         return ret;
10836 }
10837
10838
10839 /*
10840  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10841  * @vsi: VSI to be configured
10842  * @tc_map: enabled TC bitmap
10843  *
10844  * Returns 0 on success, negative value on failure
10845  */
10846 static enum i40e_status_code
10847 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10848 {
10849         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10850         struct i40e_vsi_context ctxt;
10851         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10852         enum i40e_status_code ret = I40E_SUCCESS;
10853         int i;
10854
10855         /* Check if enabled_tc is same as existing or new TCs */
10856         if (vsi->enabled_tc == tc_map)
10857                 return ret;
10858
10859         /* configure tc bandwidth */
10860         memset(&bw_data, 0, sizeof(bw_data));
10861         bw_data.tc_valid_bits = tc_map;
10862         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10863         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10864                 if (tc_map & BIT_ULL(i))
10865                         bw_data.tc_bw_credits[i] = 1;
10866         }
10867         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10868         if (ret) {
10869                 PMD_INIT_LOG(ERR,
10870                         "AQ command Config VSI BW allocation per TC failed = %d",
10871                         hw->aq.asq_last_status);
10872                 goto out;
10873         }
10874         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10875                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10876
10877         /* Update Queue Pairs Mapping for currently enabled UPs */
10878         ctxt.seid = vsi->seid;
10879         ctxt.pf_num = hw->pf_id;
10880         ctxt.vf_num = 0;
10881         ctxt.uplink_seid = vsi->uplink_seid;
10882         ctxt.info = vsi->info;
10883         i40e_get_cap(hw);
10884         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10885         if (ret)
10886                 goto out;
10887
10888         /* Update the VSI after updating the VSI queue-mapping information */
10889         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10890         if (ret) {
10891                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10892                         hw->aq.asq_last_status);
10893                 goto out;
10894         }
10895         /* update the local VSI info with updated queue map */
10896         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10897                                         sizeof(vsi->info.tc_mapping));
10898         rte_memcpy(&vsi->info.queue_mapping,
10899                         &ctxt.info.queue_mapping,
10900                 sizeof(vsi->info.queue_mapping));
10901         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10902         vsi->info.valid_sections = 0;
10903
10904         /* query and update current VSI BW information */
10905         ret = i40e_vsi_get_bw_config(vsi);
10906         if (ret) {
10907                 PMD_INIT_LOG(ERR,
10908                          "Failed updating vsi bw info, err %s aq_err %s",
10909                          i40e_stat_str(hw, ret),
10910                          i40e_aq_str(hw, hw->aq.asq_last_status));
10911                 goto out;
10912         }
10913
10914         vsi->enabled_tc = tc_map;
10915
10916 out:
10917         return ret;
10918 }
10919
10920 /*
10921  * i40e_dcb_hw_configure - program the dcb setting to hw
10922  * @pf: pf the configuration is taken on
10923  * @new_cfg: new configuration
10924  * @tc_map: enabled TC bitmap
10925  *
10926  * Returns 0 on success, negative value on failure
10927  */
10928 static enum i40e_status_code
10929 i40e_dcb_hw_configure(struct i40e_pf *pf,
10930                       struct i40e_dcbx_config *new_cfg,
10931                       uint8_t tc_map)
10932 {
10933         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10934         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10935         struct i40e_vsi *main_vsi = pf->main_vsi;
10936         struct i40e_vsi_list *vsi_list;
10937         enum i40e_status_code ret;
10938         int i;
10939         uint32_t val;
10940
10941         /* Use the FW API if FW > v4.4*/
10942         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10943               (hw->aq.fw_maj_ver >= 5))) {
10944                 PMD_INIT_LOG(ERR,
10945                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10946                 return I40E_ERR_FIRMWARE_API_VERSION;
10947         }
10948
10949         /* Check if need reconfiguration */
10950         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10951                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10952                 return I40E_SUCCESS;
10953         }
10954
10955         /* Copy the new config to the current config */
10956         *old_cfg = *new_cfg;
10957         old_cfg->etsrec = old_cfg->etscfg;
10958         ret = i40e_set_dcb_config(hw);
10959         if (ret) {
10960                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10961                          i40e_stat_str(hw, ret),
10962                          i40e_aq_str(hw, hw->aq.asq_last_status));
10963                 return ret;
10964         }
10965         /* set receive Arbiter to RR mode and ETS scheme by default */
10966         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10967                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10968                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10969                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10970                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10971                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10972                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10973                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10974                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10975                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10976                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10977                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10978                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10979         }
10980         /* get local mib to check whether it is configured correctly */
10981         /* IEEE mode */
10982         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10983         /* Get Local DCB Config */
10984         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10985                                      &hw->local_dcbx_config);
10986
10987         /* if Veb is created, need to update TC of it at first */
10988         if (main_vsi->veb) {
10989                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10990                 if (ret)
10991                         PMD_INIT_LOG(WARNING,
10992                                  "Failed configuring TC for VEB seid=%d",
10993                                  main_vsi->veb->seid);
10994         }
10995         /* Update each VSI */
10996         i40e_vsi_config_tc(main_vsi, tc_map);
10997         if (main_vsi->veb) {
10998                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10999                         /* Beside main VSI and VMDQ VSIs, only enable default
11000                          * TC for other VSIs
11001                          */
11002                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11003                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11004                                                          tc_map);
11005                         else
11006                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11007                                                          I40E_DEFAULT_TCMAP);
11008                         if (ret)
11009                                 PMD_INIT_LOG(WARNING,
11010                                         "Failed configuring TC for VSI seid=%d",
11011                                         vsi_list->vsi->seid);
11012                         /* continue */
11013                 }
11014         }
11015         return I40E_SUCCESS;
11016 }
11017
11018 /*
11019  * i40e_dcb_init_configure - initial dcb config
11020  * @dev: device being configured
11021  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11022  *
11023  * Returns 0 on success, negative value on failure
11024  */
11025 int
11026 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11027 {
11028         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11029         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11030         int i, ret = 0;
11031
11032         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11033                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11034                 return -ENOTSUP;
11035         }
11036
11037         /* DCB initialization:
11038          * Update DCB configuration from the Firmware and configure
11039          * LLDP MIB change event.
11040          */
11041         if (sw_dcb == TRUE) {
11042                 ret = i40e_init_dcb(hw);
11043                 /* If lldp agent is stopped, the return value from
11044                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11045                  * adminq status. Otherwise, it should return success.
11046                  */
11047                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11048                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11049                         memset(&hw->local_dcbx_config, 0,
11050                                 sizeof(struct i40e_dcbx_config));
11051                         /* set dcb default configuration */
11052                         hw->local_dcbx_config.etscfg.willing = 0;
11053                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11054                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11055                         hw->local_dcbx_config.etscfg.tsatable[0] =
11056                                                 I40E_IEEE_TSA_ETS;
11057                         /* all UPs mapping to TC0 */
11058                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11059                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11060                         hw->local_dcbx_config.etsrec =
11061                                 hw->local_dcbx_config.etscfg;
11062                         hw->local_dcbx_config.pfc.willing = 0;
11063                         hw->local_dcbx_config.pfc.pfccap =
11064                                                 I40E_MAX_TRAFFIC_CLASS;
11065                         /* FW needs one App to configure HW */
11066                         hw->local_dcbx_config.numapps = 1;
11067                         hw->local_dcbx_config.app[0].selector =
11068                                                 I40E_APP_SEL_ETHTYPE;
11069                         hw->local_dcbx_config.app[0].priority = 3;
11070                         hw->local_dcbx_config.app[0].protocolid =
11071                                                 I40E_APP_PROTOID_FCOE;
11072                         ret = i40e_set_dcb_config(hw);
11073                         if (ret) {
11074                                 PMD_INIT_LOG(ERR,
11075                                         "default dcb config fails. err = %d, aq_err = %d.",
11076                                         ret, hw->aq.asq_last_status);
11077                                 return -ENOSYS;
11078                         }
11079                 } else {
11080                         PMD_INIT_LOG(ERR,
11081                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11082                                 ret, hw->aq.asq_last_status);
11083                         return -ENOTSUP;
11084                 }
11085         } else {
11086                 ret = i40e_aq_start_lldp(hw, NULL);
11087                 if (ret != I40E_SUCCESS)
11088                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11089
11090                 ret = i40e_init_dcb(hw);
11091                 if (!ret) {
11092                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11093                                 PMD_INIT_LOG(ERR,
11094                                         "HW doesn't support DCBX offload.");
11095                                 return -ENOTSUP;
11096                         }
11097                 } else {
11098                         PMD_INIT_LOG(ERR,
11099                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11100                                 ret, hw->aq.asq_last_status);
11101                         return -ENOTSUP;
11102                 }
11103         }
11104         return 0;
11105 }
11106
11107 /*
11108  * i40e_dcb_setup - setup dcb related config
11109  * @dev: device being configured
11110  *
11111  * Returns 0 on success, negative value on failure
11112  */
11113 static int
11114 i40e_dcb_setup(struct rte_eth_dev *dev)
11115 {
11116         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11117         struct i40e_dcbx_config dcb_cfg;
11118         uint8_t tc_map = 0;
11119         int ret = 0;
11120
11121         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11122                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11123                 return -ENOTSUP;
11124         }
11125
11126         if (pf->vf_num != 0)
11127                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11128
11129         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11130         if (ret) {
11131                 PMD_INIT_LOG(ERR, "invalid dcb config");
11132                 return -EINVAL;
11133         }
11134         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11135         if (ret) {
11136                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11137                 return -ENOSYS;
11138         }
11139
11140         return 0;
11141 }
11142
11143 static int
11144 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11145                       struct rte_eth_dcb_info *dcb_info)
11146 {
11147         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11148         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11149         struct i40e_vsi *vsi = pf->main_vsi;
11150         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11151         uint16_t bsf, tc_mapping;
11152         int i, j = 0;
11153
11154         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11155                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11156         else
11157                 dcb_info->nb_tcs = 1;
11158         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11159                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11160         for (i = 0; i < dcb_info->nb_tcs; i++)
11161                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11162
11163         /* get queue mapping if vmdq is disabled */
11164         if (!pf->nb_cfg_vmdq_vsi) {
11165                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11166                         if (!(vsi->enabled_tc & (1 << i)))
11167                                 continue;
11168                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11169                         dcb_info->tc_queue.tc_rxq[j][i].base =
11170                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11171                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11172                         dcb_info->tc_queue.tc_txq[j][i].base =
11173                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11174                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11175                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11176                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11177                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11178                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11179                 }
11180                 return 0;
11181         }
11182
11183         /* get queue mapping if vmdq is enabled */
11184         do {
11185                 vsi = pf->vmdq[j].vsi;
11186                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11187                         if (!(vsi->enabled_tc & (1 << i)))
11188                                 continue;
11189                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11190                         dcb_info->tc_queue.tc_rxq[j][i].base =
11191                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11192                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11193                         dcb_info->tc_queue.tc_txq[j][i].base =
11194                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11195                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11196                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11197                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11198                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11199                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11200                 }
11201                 j++;
11202         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11203         return 0;
11204 }
11205
11206 static int
11207 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11208 {
11209         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11210         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11211         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11212         uint16_t msix_intr;
11213
11214         msix_intr = intr_handle->intr_vec[queue_id];
11215         if (msix_intr == I40E_MISC_VEC_ID)
11216                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11217                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11218                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11219                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11220         else
11221                 I40E_WRITE_REG(hw,
11222                                I40E_PFINT_DYN_CTLN(msix_intr -
11223                                                    I40E_RX_VEC_START),
11224                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11225                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11226                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11227
11228         I40E_WRITE_FLUSH(hw);
11229         rte_intr_enable(&pci_dev->intr_handle);
11230
11231         return 0;
11232 }
11233
11234 static int
11235 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11236 {
11237         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11238         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11239         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11240         uint16_t msix_intr;
11241
11242         msix_intr = intr_handle->intr_vec[queue_id];
11243         if (msix_intr == I40E_MISC_VEC_ID)
11244                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11245                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11246         else
11247                 I40E_WRITE_REG(hw,
11248                                I40E_PFINT_DYN_CTLN(msix_intr -
11249                                                    I40E_RX_VEC_START),
11250                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11251         I40E_WRITE_FLUSH(hw);
11252
11253         return 0;
11254 }
11255
11256 static int i40e_get_regs(struct rte_eth_dev *dev,
11257                          struct rte_dev_reg_info *regs)
11258 {
11259         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11260         uint32_t *ptr_data = regs->data;
11261         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11262         const struct i40e_reg_info *reg_info;
11263
11264         if (ptr_data == NULL) {
11265                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11266                 regs->width = sizeof(uint32_t);
11267                 return 0;
11268         }
11269
11270         /* The first few registers have to be read using AQ operations */
11271         reg_idx = 0;
11272         while (i40e_regs_adminq[reg_idx].name) {
11273                 reg_info = &i40e_regs_adminq[reg_idx++];
11274                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11275                         for (arr_idx2 = 0;
11276                                         arr_idx2 <= reg_info->count2;
11277                                         arr_idx2++) {
11278                                 reg_offset = arr_idx * reg_info->stride1 +
11279                                         arr_idx2 * reg_info->stride2;
11280                                 reg_offset += reg_info->base_addr;
11281                                 ptr_data[reg_offset >> 2] =
11282                                         i40e_read_rx_ctl(hw, reg_offset);
11283                         }
11284         }
11285
11286         /* The remaining registers can be read using primitives */
11287         reg_idx = 0;
11288         while (i40e_regs_others[reg_idx].name) {
11289                 reg_info = &i40e_regs_others[reg_idx++];
11290                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11291                         for (arr_idx2 = 0;
11292                                         arr_idx2 <= reg_info->count2;
11293                                         arr_idx2++) {
11294                                 reg_offset = arr_idx * reg_info->stride1 +
11295                                         arr_idx2 * reg_info->stride2;
11296                                 reg_offset += reg_info->base_addr;
11297                                 ptr_data[reg_offset >> 2] =
11298                                         I40E_READ_REG(hw, reg_offset);
11299                         }
11300         }
11301
11302         return 0;
11303 }
11304
11305 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11306 {
11307         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11308
11309         /* Convert word count to byte count */
11310         return hw->nvm.sr_size << 1;
11311 }
11312
11313 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11314                            struct rte_dev_eeprom_info *eeprom)
11315 {
11316         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11317         uint16_t *data = eeprom->data;
11318         uint16_t offset, length, cnt_words;
11319         int ret_code;
11320
11321         offset = eeprom->offset >> 1;
11322         length = eeprom->length >> 1;
11323         cnt_words = length;
11324
11325         if (offset > hw->nvm.sr_size ||
11326                 offset + length > hw->nvm.sr_size) {
11327                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11328                 return -EINVAL;
11329         }
11330
11331         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11332
11333         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11334         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11335                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11336                 return -EIO;
11337         }
11338
11339         return 0;
11340 }
11341
11342 static int i40e_get_module_info(struct rte_eth_dev *dev,
11343                                 struct rte_eth_dev_module_info *modinfo)
11344 {
11345         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11346         uint32_t sff8472_comp = 0;
11347         uint32_t sff8472_swap = 0;
11348         uint32_t sff8636_rev = 0;
11349         i40e_status status;
11350         uint32_t type = 0;
11351
11352         /* Check if firmware supports reading module EEPROM. */
11353         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11354                 PMD_DRV_LOG(ERR,
11355                             "Module EEPROM memory read not supported. "
11356                             "Please update the NVM image.\n");
11357                 return -EINVAL;
11358         }
11359
11360         status = i40e_update_link_info(hw);
11361         if (status)
11362                 return -EIO;
11363
11364         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11365                 PMD_DRV_LOG(ERR,
11366                             "Cannot read module EEPROM memory. "
11367                             "No module connected.\n");
11368                 return -EINVAL;
11369         }
11370
11371         type = hw->phy.link_info.module_type[0];
11372
11373         switch (type) {
11374         case I40E_MODULE_TYPE_SFP:
11375                 status = i40e_aq_get_phy_register(hw,
11376                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11377                                 I40E_I2C_EEPROM_DEV_ADDR,
11378                                 I40E_MODULE_SFF_8472_COMP,
11379                                 &sff8472_comp, NULL);
11380                 if (status)
11381                         return -EIO;
11382
11383                 status = i40e_aq_get_phy_register(hw,
11384                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11385                                 I40E_I2C_EEPROM_DEV_ADDR,
11386                                 I40E_MODULE_SFF_8472_SWAP,
11387                                 &sff8472_swap, NULL);
11388                 if (status)
11389                         return -EIO;
11390
11391                 /* Check if the module requires address swap to access
11392                  * the other EEPROM memory page.
11393                  */
11394                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11395                         PMD_DRV_LOG(WARNING,
11396                                     "Module address swap to access "
11397                                     "page 0xA2 is not supported.\n");
11398                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11399                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11400                 } else if (sff8472_comp == 0x00) {
11401                         /* Module is not SFF-8472 compliant */
11402                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11403                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11404                 } else {
11405                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11406                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11407                 }
11408                 break;
11409         case I40E_MODULE_TYPE_QSFP_PLUS:
11410                 /* Read from memory page 0. */
11411                 status = i40e_aq_get_phy_register(hw,
11412                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11413                                 0,
11414                                 I40E_MODULE_REVISION_ADDR,
11415                                 &sff8636_rev, NULL);
11416                 if (status)
11417                         return -EIO;
11418                 /* Determine revision compliance byte */
11419                 if (sff8636_rev > 0x02) {
11420                         /* Module is SFF-8636 compliant */
11421                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11422                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11423                 } else {
11424                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11425                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11426                 }
11427                 break;
11428         case I40E_MODULE_TYPE_QSFP28:
11429                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11430                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11431                 break;
11432         default:
11433                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11434                 return -EINVAL;
11435         }
11436         return 0;
11437 }
11438
11439 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11440                                   struct rte_dev_eeprom_info *info)
11441 {
11442         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11443         bool is_sfp = false;
11444         i40e_status status;
11445         uint8_t *data = info->data;
11446         uint32_t value = 0;
11447         uint32_t i;
11448
11449         if (!info || !info->length || !data)
11450                 return -EINVAL;
11451
11452         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11453                 is_sfp = true;
11454
11455         for (i = 0; i < info->length; i++) {
11456                 u32 offset = i + info->offset;
11457                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11458
11459                 /* Check if we need to access the other memory page */
11460                 if (is_sfp) {
11461                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11462                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11463                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11464                         }
11465                 } else {
11466                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11467                                 /* Compute memory page number and offset. */
11468                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11469                                 addr++;
11470                         }
11471                 }
11472                 status = i40e_aq_get_phy_register(hw,
11473                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11474                                 addr, offset, &value, NULL);
11475                 if (status)
11476                         return -EIO;
11477                 data[i] = (uint8_t)value;
11478         }
11479         return 0;
11480 }
11481
11482 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11483                                      struct ether_addr *mac_addr)
11484 {
11485         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11486         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11487         struct i40e_vsi *vsi = pf->main_vsi;
11488         struct i40e_mac_filter_info mac_filter;
11489         struct i40e_mac_filter *f;
11490         int ret;
11491
11492         if (!is_valid_assigned_ether_addr(mac_addr)) {
11493                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11494                 return -EINVAL;
11495         }
11496
11497         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11498                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11499                         break;
11500         }
11501
11502         if (f == NULL) {
11503                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11504                 return -EIO;
11505         }
11506
11507         mac_filter = f->mac_info;
11508         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11509         if (ret != I40E_SUCCESS) {
11510                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11511                 return -EIO;
11512         }
11513         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11514         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11515         if (ret != I40E_SUCCESS) {
11516                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11517                 return -EIO;
11518         }
11519         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11520
11521         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11522                                         mac_addr->addr_bytes, NULL);
11523         if (ret != I40E_SUCCESS) {
11524                 PMD_DRV_LOG(ERR, "Failed to change mac");
11525                 return -EIO;
11526         }
11527
11528         return 0;
11529 }
11530
11531 static int
11532 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11533 {
11534         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11535         struct rte_eth_dev_data *dev_data = pf->dev_data;
11536         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11537         int ret = 0;
11538
11539         /* check if mtu is within the allowed range */
11540         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11541                 return -EINVAL;
11542
11543         /* mtu setting is forbidden if port is start */
11544         if (dev_data->dev_started) {
11545                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11546                             dev_data->port_id);
11547                 return -EBUSY;
11548         }
11549
11550         if (frame_size > ETHER_MAX_LEN)
11551                 dev_data->dev_conf.rxmode.offloads |=
11552                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11553         else
11554                 dev_data->dev_conf.rxmode.offloads &=
11555                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11556
11557         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11558
11559         return ret;
11560 }
11561
11562 /* Restore ethertype filter */
11563 static void
11564 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11565 {
11566         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11567         struct i40e_ethertype_filter_list
11568                 *ethertype_list = &pf->ethertype.ethertype_list;
11569         struct i40e_ethertype_filter *f;
11570         struct i40e_control_filter_stats stats;
11571         uint16_t flags;
11572
11573         TAILQ_FOREACH(f, ethertype_list, rules) {
11574                 flags = 0;
11575                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11576                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11577                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11578                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11579                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11580
11581                 memset(&stats, 0, sizeof(stats));
11582                 i40e_aq_add_rem_control_packet_filter(hw,
11583                                             f->input.mac_addr.addr_bytes,
11584                                             f->input.ether_type,
11585                                             flags, pf->main_vsi->seid,
11586                                             f->queue, 1, &stats, NULL);
11587         }
11588         PMD_DRV_LOG(INFO, "Ethertype filter:"
11589                     " mac_etype_used = %u, etype_used = %u,"
11590                     " mac_etype_free = %u, etype_free = %u",
11591                     stats.mac_etype_used, stats.etype_used,
11592                     stats.mac_etype_free, stats.etype_free);
11593 }
11594
11595 /* Restore tunnel filter */
11596 static void
11597 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11598 {
11599         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11600         struct i40e_vsi *vsi;
11601         struct i40e_pf_vf *vf;
11602         struct i40e_tunnel_filter_list
11603                 *tunnel_list = &pf->tunnel.tunnel_list;
11604         struct i40e_tunnel_filter *f;
11605         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11606         bool big_buffer = 0;
11607
11608         TAILQ_FOREACH(f, tunnel_list, rules) {
11609                 if (!f->is_to_vf)
11610                         vsi = pf->main_vsi;
11611                 else {
11612                         vf = &pf->vfs[f->vf_id];
11613                         vsi = vf->vsi;
11614                 }
11615                 memset(&cld_filter, 0, sizeof(cld_filter));
11616                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11617                         (struct ether_addr *)&cld_filter.element.outer_mac);
11618                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11619                         (struct ether_addr *)&cld_filter.element.inner_mac);
11620                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11621                 cld_filter.element.flags = f->input.flags;
11622                 cld_filter.element.tenant_id = f->input.tenant_id;
11623                 cld_filter.element.queue_number = f->queue;
11624                 rte_memcpy(cld_filter.general_fields,
11625                            f->input.general_fields,
11626                            sizeof(f->input.general_fields));
11627
11628                 if (((f->input.flags &
11629                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11630                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11631                     ((f->input.flags &
11632                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11633                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11634                     ((f->input.flags &
11635                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11636                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11637                         big_buffer = 1;
11638
11639                 if (big_buffer)
11640                         i40e_aq_add_cloud_filters_big_buffer(hw,
11641                                              vsi->seid, &cld_filter, 1);
11642                 else
11643                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11644                                                   &cld_filter.element, 1);
11645         }
11646 }
11647
11648 /* Restore rss filter */
11649 static inline void
11650 i40e_rss_filter_restore(struct i40e_pf *pf)
11651 {
11652         struct i40e_rte_flow_rss_conf *conf =
11653                                         &pf->rss_info;
11654         if (conf->conf.queue_num)
11655                 i40e_config_rss_filter(pf, conf, TRUE);
11656 }
11657
11658 static void
11659 i40e_filter_restore(struct i40e_pf *pf)
11660 {
11661         i40e_ethertype_filter_restore(pf);
11662         i40e_tunnel_filter_restore(pf);
11663         i40e_fdir_filter_restore(pf);
11664         i40e_rss_filter_restore(pf);
11665 }
11666
11667 static bool
11668 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11669 {
11670         if (strcmp(dev->device->driver->name, drv->driver.name))
11671                 return false;
11672
11673         return true;
11674 }
11675
11676 bool
11677 is_i40e_supported(struct rte_eth_dev *dev)
11678 {
11679         return is_device_supported(dev, &rte_i40e_pmd);
11680 }
11681
11682 struct i40e_customized_pctype*
11683 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11684 {
11685         int i;
11686
11687         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11688                 if (pf->customized_pctype[i].index == index)
11689                         return &pf->customized_pctype[i];
11690         }
11691         return NULL;
11692 }
11693
11694 static int
11695 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11696                               uint32_t pkg_size, uint32_t proto_num,
11697                               struct rte_pmd_i40e_proto_info *proto,
11698                               enum rte_pmd_i40e_package_op op)
11699 {
11700         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11701         uint32_t pctype_num;
11702         struct rte_pmd_i40e_ptype_info *pctype;
11703         uint32_t buff_size;
11704         struct i40e_customized_pctype *new_pctype = NULL;
11705         uint8_t proto_id;
11706         uint8_t pctype_value;
11707         char name[64];
11708         uint32_t i, j, n;
11709         int ret;
11710
11711         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11712             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11713                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11714                 return -1;
11715         }
11716
11717         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11718                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11719                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11720         if (ret) {
11721                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11722                 return -1;
11723         }
11724         if (!pctype_num) {
11725                 PMD_DRV_LOG(INFO, "No new pctype added");
11726                 return -1;
11727         }
11728
11729         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11730         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11731         if (!pctype) {
11732                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11733                 return -1;
11734         }
11735         /* get information about new pctype list */
11736         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11737                                         (uint8_t *)pctype, buff_size,
11738                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11739         if (ret) {
11740                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11741                 rte_free(pctype);
11742                 return -1;
11743         }
11744
11745         /* Update customized pctype. */
11746         for (i = 0; i < pctype_num; i++) {
11747                 pctype_value = pctype[i].ptype_id;
11748                 memset(name, 0, sizeof(name));
11749                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11750                         proto_id = pctype[i].protocols[j];
11751                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11752                                 continue;
11753                         for (n = 0; n < proto_num; n++) {
11754                                 if (proto[n].proto_id != proto_id)
11755                                         continue;
11756                                 strcat(name, proto[n].name);
11757                                 strcat(name, "_");
11758                                 break;
11759                         }
11760                 }
11761                 name[strlen(name) - 1] = '\0';
11762                 if (!strcmp(name, "GTPC"))
11763                         new_pctype =
11764                                 i40e_find_customized_pctype(pf,
11765                                                       I40E_CUSTOMIZED_GTPC);
11766                 else if (!strcmp(name, "GTPU_IPV4"))
11767                         new_pctype =
11768                                 i40e_find_customized_pctype(pf,
11769                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11770                 else if (!strcmp(name, "GTPU_IPV6"))
11771                         new_pctype =
11772                                 i40e_find_customized_pctype(pf,
11773                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11774                 else if (!strcmp(name, "GTPU"))
11775                         new_pctype =
11776                                 i40e_find_customized_pctype(pf,
11777                                                       I40E_CUSTOMIZED_GTPU);
11778                 if (new_pctype) {
11779                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11780                                 new_pctype->pctype = pctype_value;
11781                                 new_pctype->valid = true;
11782                         } else {
11783                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11784                                 new_pctype->valid = false;
11785                         }
11786                 }
11787         }
11788
11789         rte_free(pctype);
11790         return 0;
11791 }
11792
11793 static int
11794 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11795                              uint32_t pkg_size, uint32_t proto_num,
11796                              struct rte_pmd_i40e_proto_info *proto,
11797                              enum rte_pmd_i40e_package_op op)
11798 {
11799         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11800         uint16_t port_id = dev->data->port_id;
11801         uint32_t ptype_num;
11802         struct rte_pmd_i40e_ptype_info *ptype;
11803         uint32_t buff_size;
11804         uint8_t proto_id;
11805         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11806         uint32_t i, j, n;
11807         bool in_tunnel;
11808         int ret;
11809
11810         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11811             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11812                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11813                 return -1;
11814         }
11815
11816         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
11817                 rte_pmd_i40e_ptype_mapping_reset(port_id);
11818                 return 0;
11819         }
11820
11821         /* get information about new ptype num */
11822         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11823                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11824                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11825         if (ret) {
11826                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11827                 return ret;
11828         }
11829         if (!ptype_num) {
11830                 PMD_DRV_LOG(INFO, "No new ptype added");
11831                 return -1;
11832         }
11833
11834         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11835         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11836         if (!ptype) {
11837                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11838                 return -1;
11839         }
11840
11841         /* get information about new ptype list */
11842         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11843                                         (uint8_t *)ptype, buff_size,
11844                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11845         if (ret) {
11846                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11847                 rte_free(ptype);
11848                 return ret;
11849         }
11850
11851         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11852         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11853         if (!ptype_mapping) {
11854                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11855                 rte_free(ptype);
11856                 return -1;
11857         }
11858
11859         /* Update ptype mapping table. */
11860         for (i = 0; i < ptype_num; i++) {
11861                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11862                 ptype_mapping[i].sw_ptype = 0;
11863                 in_tunnel = false;
11864                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11865                         proto_id = ptype[i].protocols[j];
11866                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11867                                 continue;
11868                         for (n = 0; n < proto_num; n++) {
11869                                 if (proto[n].proto_id != proto_id)
11870                                         continue;
11871                                 memset(name, 0, sizeof(name));
11872                                 strcpy(name, proto[n].name);
11873                                 if (!strncasecmp(name, "PPPOE", 5))
11874                                         ptype_mapping[i].sw_ptype |=
11875                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11876                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11877                                          !in_tunnel) {
11878                                         ptype_mapping[i].sw_ptype |=
11879                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11880                                         ptype_mapping[i].sw_ptype |=
11881                                                 RTE_PTYPE_L4_FRAG;
11882                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11883                                            in_tunnel) {
11884                                         ptype_mapping[i].sw_ptype |=
11885                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11886                                         ptype_mapping[i].sw_ptype |=
11887                                                 RTE_PTYPE_INNER_L4_FRAG;
11888                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
11889                                         ptype_mapping[i].sw_ptype |=
11890                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11891                                         in_tunnel = true;
11892                                 } else if (!strncasecmp(name, "IPV4", 4) &&
11893                                            !in_tunnel)
11894                                         ptype_mapping[i].sw_ptype |=
11895                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11896                                 else if (!strncasecmp(name, "IPV4", 4) &&
11897                                          in_tunnel)
11898                                         ptype_mapping[i].sw_ptype |=
11899                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11900                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11901                                          !in_tunnel) {
11902                                         ptype_mapping[i].sw_ptype |=
11903                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11904                                         ptype_mapping[i].sw_ptype |=
11905                                                 RTE_PTYPE_L4_FRAG;
11906                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11907                                            in_tunnel) {
11908                                         ptype_mapping[i].sw_ptype |=
11909                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11910                                         ptype_mapping[i].sw_ptype |=
11911                                                 RTE_PTYPE_INNER_L4_FRAG;
11912                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
11913                                         ptype_mapping[i].sw_ptype |=
11914                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11915                                         in_tunnel = true;
11916                                 } else if (!strncasecmp(name, "IPV6", 4) &&
11917                                            !in_tunnel)
11918                                         ptype_mapping[i].sw_ptype |=
11919                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11920                                 else if (!strncasecmp(name, "IPV6", 4) &&
11921                                          in_tunnel)
11922                                         ptype_mapping[i].sw_ptype |=
11923                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11924                                 else if (!strncasecmp(name, "UDP", 3) &&
11925                                          !in_tunnel)
11926                                         ptype_mapping[i].sw_ptype |=
11927                                                 RTE_PTYPE_L4_UDP;
11928                                 else if (!strncasecmp(name, "UDP", 3) &&
11929                                          in_tunnel)
11930                                         ptype_mapping[i].sw_ptype |=
11931                                                 RTE_PTYPE_INNER_L4_UDP;
11932                                 else if (!strncasecmp(name, "TCP", 3) &&
11933                                          !in_tunnel)
11934                                         ptype_mapping[i].sw_ptype |=
11935                                                 RTE_PTYPE_L4_TCP;
11936                                 else if (!strncasecmp(name, "TCP", 3) &&
11937                                          in_tunnel)
11938                                         ptype_mapping[i].sw_ptype |=
11939                                                 RTE_PTYPE_INNER_L4_TCP;
11940                                 else if (!strncasecmp(name, "SCTP", 4) &&
11941                                          !in_tunnel)
11942                                         ptype_mapping[i].sw_ptype |=
11943                                                 RTE_PTYPE_L4_SCTP;
11944                                 else if (!strncasecmp(name, "SCTP", 4) &&
11945                                          in_tunnel)
11946                                         ptype_mapping[i].sw_ptype |=
11947                                                 RTE_PTYPE_INNER_L4_SCTP;
11948                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11949                                           !strncasecmp(name, "ICMPV6", 6)) &&
11950                                          !in_tunnel)
11951                                         ptype_mapping[i].sw_ptype |=
11952                                                 RTE_PTYPE_L4_ICMP;
11953                                 else if ((!strncasecmp(name, "ICMP", 4) ||
11954                                           !strncasecmp(name, "ICMPV6", 6)) &&
11955                                          in_tunnel)
11956                                         ptype_mapping[i].sw_ptype |=
11957                                                 RTE_PTYPE_INNER_L4_ICMP;
11958                                 else if (!strncasecmp(name, "GTPC", 4)) {
11959                                         ptype_mapping[i].sw_ptype |=
11960                                                 RTE_PTYPE_TUNNEL_GTPC;
11961                                         in_tunnel = true;
11962                                 } else if (!strncasecmp(name, "GTPU", 4)) {
11963                                         ptype_mapping[i].sw_ptype |=
11964                                                 RTE_PTYPE_TUNNEL_GTPU;
11965                                         in_tunnel = true;
11966                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
11967                                         ptype_mapping[i].sw_ptype |=
11968                                                 RTE_PTYPE_TUNNEL_GRENAT;
11969                                         in_tunnel = true;
11970                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
11971                                         ptype_mapping[i].sw_ptype |=
11972                                                 RTE_PTYPE_TUNNEL_L2TP;
11973                                         in_tunnel = true;
11974                                 }
11975
11976                                 break;
11977                         }
11978                 }
11979         }
11980
11981         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11982                                                 ptype_num, 0);
11983         if (ret)
11984                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
11985
11986         rte_free(ptype_mapping);
11987         rte_free(ptype);
11988         return ret;
11989 }
11990
11991 void
11992 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11993                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
11994 {
11995         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11996         uint32_t proto_num;
11997         struct rte_pmd_i40e_proto_info *proto;
11998         uint32_t buff_size;
11999         uint32_t i;
12000         int ret;
12001
12002         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12003             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12004                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12005                 return;
12006         }
12007
12008         /* get information about protocol number */
12009         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12010                                        (uint8_t *)&proto_num, sizeof(proto_num),
12011                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12012         if (ret) {
12013                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12014                 return;
12015         }
12016         if (!proto_num) {
12017                 PMD_DRV_LOG(INFO, "No new protocol added");
12018                 return;
12019         }
12020
12021         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12022         proto = rte_zmalloc("new_proto", buff_size, 0);
12023         if (!proto) {
12024                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12025                 return;
12026         }
12027
12028         /* get information about protocol list */
12029         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12030                                         (uint8_t *)proto, buff_size,
12031                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12032         if (ret) {
12033                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12034                 rte_free(proto);
12035                 return;
12036         }
12037
12038         /* Check if GTP is supported. */
12039         for (i = 0; i < proto_num; i++) {
12040                 if (!strncmp(proto[i].name, "GTP", 3)) {
12041                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12042                                 pf->gtp_support = true;
12043                         else
12044                                 pf->gtp_support = false;
12045                         break;
12046                 }
12047         }
12048
12049         /* Update customized pctype info */
12050         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12051                                             proto_num, proto, op);
12052         if (ret)
12053                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12054
12055         /* Update customized ptype info */
12056         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12057                                            proto_num, proto, op);
12058         if (ret)
12059                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12060
12061         rte_free(proto);
12062 }
12063
12064 /* Create a QinQ cloud filter
12065  *
12066  * The Fortville NIC has limited resources for tunnel filters,
12067  * so we can only reuse existing filters.
12068  *
12069  * In step 1 we define which Field Vector fields can be used for
12070  * filter types.
12071  * As we do not have the inner tag defined as a field,
12072  * we have to define it first, by reusing one of L1 entries.
12073  *
12074  * In step 2 we are replacing one of existing filter types with
12075  * a new one for QinQ.
12076  * As we reusing L1 and replacing L2, some of the default filter
12077  * types will disappear,which depends on L1 and L2 entries we reuse.
12078  *
12079  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12080  *
12081  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12082  *              later when we define the cloud filter.
12083  *      a.      Valid_flags.replace_cloud = 0
12084  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12085  *      c.      New_filter = 0x10
12086  *      d.      TR bit = 0xff (optional, not used here)
12087  *      e.      Buffer – 2 entries:
12088  *              i.      Byte 0 = 8 (outer vlan FV index).
12089  *                      Byte 1 = 0 (rsv)
12090  *                      Byte 2-3 = 0x0fff
12091  *              ii.     Byte 0 = 37 (inner vlan FV index).
12092  *                      Byte 1 =0 (rsv)
12093  *                      Byte 2-3 = 0x0fff
12094  *
12095  * Step 2:
12096  * 2.   Create cloud filter using two L1 filters entries: stag and
12097  *              new filter(outer vlan+ inner vlan)
12098  *      a.      Valid_flags.replace_cloud = 1
12099  *      b.      Old_filter = 1 (instead of outer IP)
12100  *      c.      New_filter = 0x10
12101  *      d.      Buffer – 2 entries:
12102  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12103  *                      Byte 1-3 = 0 (rsv)
12104  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12105  *                      Byte 9-11 = 0 (rsv)
12106  */
12107 static int
12108 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12109 {
12110         int ret = -ENOTSUP;
12111         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12112         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12113         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12114
12115         if (pf->support_multi_driver) {
12116                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12117                 return ret;
12118         }
12119
12120         /* Init */
12121         memset(&filter_replace, 0,
12122                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12123         memset(&filter_replace_buf, 0,
12124                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12125
12126         /* create L1 filter */
12127         filter_replace.old_filter_type =
12128                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12129         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12130         filter_replace.tr_bit = 0;
12131
12132         /* Prepare the buffer, 2 entries */
12133         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12134         filter_replace_buf.data[0] |=
12135                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12136         /* Field Vector 12b mask */
12137         filter_replace_buf.data[2] = 0xff;
12138         filter_replace_buf.data[3] = 0x0f;
12139         filter_replace_buf.data[4] =
12140                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12141         filter_replace_buf.data[4] |=
12142                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12143         /* Field Vector 12b mask */
12144         filter_replace_buf.data[6] = 0xff;
12145         filter_replace_buf.data[7] = 0x0f;
12146         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12147                         &filter_replace_buf);
12148         if (ret != I40E_SUCCESS)
12149                 return ret;
12150         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12151                     "cloud l1 type is changed from 0x%x to 0x%x",
12152                     filter_replace.old_filter_type,
12153                     filter_replace.new_filter_type);
12154
12155         /* Apply the second L2 cloud filter */
12156         memset(&filter_replace, 0,
12157                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12158         memset(&filter_replace_buf, 0,
12159                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12160
12161         /* create L2 filter, input for L2 filter will be L1 filter  */
12162         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12163         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12164         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12165
12166         /* Prepare the buffer, 2 entries */
12167         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12168         filter_replace_buf.data[0] |=
12169                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12170         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12171         filter_replace_buf.data[4] |=
12172                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12173         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12174                         &filter_replace_buf);
12175         if (!ret) {
12176                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
12177                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12178                             "cloud filter type is changed from 0x%x to 0x%x",
12179                             filter_replace.old_filter_type,
12180                             filter_replace.new_filter_type);
12181         }
12182         return ret;
12183 }
12184
12185 int
12186 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12187                    const struct rte_flow_action_rss *in)
12188 {
12189         if (in->key_len > RTE_DIM(out->key) ||
12190             in->queue_num > RTE_DIM(out->queue))
12191                 return -EINVAL;
12192         out->conf = (struct rte_flow_action_rss){
12193                 .types = in->types,
12194                 .key_len = in->key_len,
12195                 .queue_num = in->queue_num,
12196                 .key = memcpy(out->key, in->key, in->key_len),
12197                 .queue = memcpy(out->queue, in->queue,
12198                                 sizeof(*in->queue) * in->queue_num),
12199         };
12200         return 0;
12201 }
12202
12203 int
12204 i40e_action_rss_same(const struct rte_flow_action_rss *comp,
12205                      const struct rte_flow_action_rss *with)
12206 {
12207         return (comp->types == with->types &&
12208                 comp->key_len == with->key_len &&
12209                 comp->queue_num == with->queue_num &&
12210                 !memcmp(comp->key, with->key, with->key_len) &&
12211                 !memcmp(comp->queue, with->queue,
12212                         sizeof(*with->queue) * with->queue_num));
12213 }
12214
12215 int
12216 i40e_config_rss_filter(struct i40e_pf *pf,
12217                 struct i40e_rte_flow_rss_conf *conf, bool add)
12218 {
12219         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12220         uint32_t i, lut = 0;
12221         uint16_t j, num;
12222         struct rte_eth_rss_conf rss_conf = {
12223                 .rss_key = conf->conf.key_len ?
12224                         (void *)(uintptr_t)conf->conf.key : NULL,
12225                 .rss_key_len = conf->conf.key_len,
12226                 .rss_hf = conf->conf.types,
12227         };
12228         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12229
12230         if (!add) {
12231                 if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
12232                         i40e_pf_disable_rss(pf);
12233                         memset(rss_info, 0,
12234                                 sizeof(struct i40e_rte_flow_rss_conf));
12235                         return 0;
12236                 }
12237                 return -EINVAL;
12238         }
12239
12240         if (rss_info->conf.queue_num)
12241                 return -EINVAL;
12242
12243         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
12244          * It's necessary to calculate the actual PF queues that are configured.
12245          */
12246         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12247                 num = i40e_pf_calc_configured_queues_num(pf);
12248         else
12249                 num = pf->dev_data->nb_rx_queues;
12250
12251         num = RTE_MIN(num, conf->conf.queue_num);
12252         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12253                         num);
12254
12255         if (num == 0) {
12256                 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
12257                 return -ENOTSUP;
12258         }
12259
12260         /* Fill in redirection table */
12261         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12262                 if (j == num)
12263                         j = 0;
12264                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12265                         hw->func_caps.rss_table_entry_width) - 1));
12266                 if ((i & 3) == 3)
12267                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12268         }
12269
12270         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
12271                 i40e_pf_disable_rss(pf);
12272                 return 0;
12273         }
12274         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12275                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12276                 /* Random default keys */
12277                 static uint32_t rss_key_default[] = {0x6b793944,
12278                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12279                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12280                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12281
12282                 rss_conf.rss_key = (uint8_t *)rss_key_default;
12283                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12284                                                         sizeof(uint32_t);
12285         }
12286
12287         i40e_hw_rss_hash_set(pf, &rss_conf);
12288
12289         if (i40e_rss_conf_init(rss_info, &conf->conf))
12290                 return -EINVAL;
12291
12292         return 0;
12293 }
12294
12295 RTE_INIT(i40e_init_log);
12296 static void
12297 i40e_init_log(void)
12298 {
12299         i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12300         if (i40e_logtype_init >= 0)
12301                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12302         i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12303         if (i40e_logtype_driver >= 0)
12304                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12305 }
12306
12307 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12308                               QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12309                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");