afa9a1c133f8172131f190d4f67e7a6132f44839
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_eth_ctrl.h>
28 #include <rte_tailq.h>
29 #include <rte_hash_crc.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
45
46 #define I40E_CLEAR_PXE_WAIT_MS     200
47
48 /* Maximun number of capability elements */
49 #define I40E_MAX_CAP_ELE_NUM       128
50
51 /* Wait count and interval */
52 #define I40E_CHK_Q_ENA_COUNT       1000
53 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
54
55 /* Maximun number of VSI */
56 #define I40E_MAX_NUM_VSIS          (384UL)
57
58 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
59
60 /* Flow control default timer */
61 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
62
63 /* Flow control enable fwd bit */
64 #define I40E_PRTMAC_FWD_CTRL   0x00000001
65
66 /* Receive Packet Buffer size */
67 #define I40E_RXPBSIZE (968 * 1024)
68
69 /* Kilobytes shift */
70 #define I40E_KILOSHIFT 10
71
72 /* Flow control default high water */
73 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
74
75 /* Flow control default low water */
76 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
77
78 /* Receive Average Packet Size in Byte*/
79 #define I40E_PACKET_AVERAGE_SIZE 128
80
81 /* Mask of PF interrupt causes */
82 #define I40E_PFINT_ICR0_ENA_MASK ( \
83                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
84                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
85                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
86                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
87                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
88                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
89                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
90                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
91                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
92
93 #define I40E_FLOW_TYPES ( \
94         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
95         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
96         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
97         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
98         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
99         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
102         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
104         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
105
106 /* Additional timesync values. */
107 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
108 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
109 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
110 #define I40E_PRTTSYN_TSYNENA     0x80000000
111 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
112 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
113
114 /**
115  * Below are values for writing un-exposed registers suggested
116  * by silicon experts
117  */
118 /* Destination MAC address */
119 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
120 /* Source MAC address */
121 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
122 /* Outer (S-Tag) VLAN tag in the outer L2 header */
123 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
124 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
126 /* Single VLAN tag in the inner L2 header */
127 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
128 /* Source IPv4 address */
129 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
130 /* Destination IPv4 address */
131 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
132 /* Source IPv4 address for X722 */
133 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
134 /* Destination IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
136 /* IPv4 Protocol for X722 */
137 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
138 /* IPv4 Time to Live for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
140 /* IPv4 Type of Service (TOS) */
141 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
142 /* IPv4 Protocol */
143 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
144 /* IPv4 Time to Live */
145 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
146 /* Source IPv6 address */
147 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
148 /* Destination IPv6 address */
149 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
150 /* IPv6 Traffic Class (TC) */
151 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
152 /* IPv6 Next Header */
153 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
154 /* IPv6 Hop Limit */
155 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
156 /* Source L4 port */
157 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
158 /* Destination L4 port */
159 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
160 /* SCTP verification tag */
161 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
162 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
163 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
164 /* Source port of tunneling UDP */
165 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
166 /* Destination port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
168 /* UDP Tunneling ID, NVGRE/GRE key */
169 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
170 /* Last ether type */
171 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
172 /* Tunneling outer destination IPv4 address */
173 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
174 /* Tunneling outer destination IPv6 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
176 /* 1st word of flex payload */
177 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
178 /* 2nd word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
180 /* 3rd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
182 /* 4th word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
184 /* 5th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
186 /* 6th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
188 /* 7th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
190 /* 8th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
192 /* all 8 words flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
194 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
195
196 #define I40E_TRANSLATE_INSET 0
197 #define I40E_TRANSLATE_REG   1
198
199 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
200 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
201 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
202 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
203 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
204 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
205
206 /* PCI offset for querying capability */
207 #define PCI_DEV_CAP_REG            0xA4
208 /* PCI offset for enabling/disabling Extended Tag */
209 #define PCI_DEV_CTRL_REG           0xA8
210 /* Bit mask of Extended Tag capability */
211 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
212 /* Bit shift of Extended Tag enable/disable */
213 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
214 /* Bit mask of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
216
217 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
218 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
219 static int i40e_dev_configure(struct rte_eth_dev *dev);
220 static int i40e_dev_start(struct rte_eth_dev *dev);
221 static void i40e_dev_stop(struct rte_eth_dev *dev);
222 static void i40e_dev_close(struct rte_eth_dev *dev);
223 static int  i40e_dev_reset(struct rte_eth_dev *dev);
224 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
225 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
226 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
228 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
229 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
230 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
231                                struct rte_eth_stats *stats);
232 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
233                                struct rte_eth_xstat *xstats, unsigned n);
234 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
235                                      struct rte_eth_xstat_name *xstats_names,
236                                      unsigned limit);
237 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
238 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
239                                             uint16_t queue_id,
240                                             uint8_t stat_idx,
241                                             uint8_t is_rx);
242 static int i40e_fw_version_get(struct rte_eth_dev *dev,
243                                 char *fw_version, size_t fw_size);
244 static void i40e_dev_info_get(struct rte_eth_dev *dev,
245                               struct rte_eth_dev_info *dev_info);
246 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
247                                 uint16_t vlan_id,
248                                 int on);
249 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
250                               enum rte_vlan_type vlan_type,
251                               uint16_t tpid);
252 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
254                                       uint16_t queue,
255                                       int on);
256 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
257 static int i40e_dev_led_on(struct rte_eth_dev *dev);
258 static int i40e_dev_led_off(struct rte_eth_dev *dev);
259 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
260                               struct rte_eth_fc_conf *fc_conf);
261 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
262                               struct rte_eth_fc_conf *fc_conf);
263 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
264                                        struct rte_eth_pfc_conf *pfc_conf);
265 static int i40e_macaddr_add(struct rte_eth_dev *dev,
266                             struct ether_addr *mac_addr,
267                             uint32_t index,
268                             uint32_t pool);
269 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
270 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
271                                     struct rte_eth_rss_reta_entry64 *reta_conf,
272                                     uint16_t reta_size);
273 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
274                                    struct rte_eth_rss_reta_entry64 *reta_conf,
275                                    uint16_t reta_size);
276
277 static int i40e_get_cap(struct i40e_hw *hw);
278 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
279 static int i40e_pf_setup(struct i40e_pf *pf);
280 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
281 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
282 static int i40e_dcb_setup(struct rte_eth_dev *dev);
283 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
284                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
285 static void i40e_stat_update_48(struct i40e_hw *hw,
286                                uint32_t hireg,
287                                uint32_t loreg,
288                                bool offset_loaded,
289                                uint64_t *offset,
290                                uint64_t *stat);
291 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
292 static void i40e_dev_interrupt_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294                                 uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
297                         uint32_t base);
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
299                         uint16_t num);
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303                                                 struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307                                              struct i40e_macvlan_filter *mv_f,
308                                              int num,
309                                              uint16_t vlan);
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312                                     struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314                                       struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316                                         struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318                                         struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321                                 enum rte_filter_op filter_op,
322                                 void *arg);
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324                                 enum rte_filter_type filter_type,
325                                 enum rte_filter_op filter_op,
326                                 void *arg);
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328                                   struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
334                                                      uint16_t seid,
335                                                      uint16_t rule_type,
336                                                      uint16_t *entries,
337                                                      uint16_t count,
338                                                      uint16_t rule_id);
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340                         struct rte_eth_mirror_conf *mirror_conf,
341                         uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
343
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347                                            struct timespec *timestamp,
348                                            uint32_t flags);
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350                                            struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
352
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356                                    struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358                                     const struct timespec *timestamp);
359
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
361                                          uint16_t queue_id);
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
363                                           uint16_t queue_id);
364
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366                          struct rte_dev_reg_info *regs);
367
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
369
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371                            struct rte_dev_eeprom_info *eeprom);
372
373 static int i40e_get_module_info(struct rte_eth_dev *dev,
374                                 struct rte_eth_dev_module_info *modinfo);
375 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
376                                   struct rte_dev_eeprom_info *info);
377
378 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
379                                       struct ether_addr *mac_addr);
380
381 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
382
383 static int i40e_ethertype_filter_convert(
384         const struct rte_eth_ethertype_filter *input,
385         struct i40e_ethertype_filter *filter);
386 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
387                                    struct i40e_ethertype_filter *filter);
388
389 static int i40e_tunnel_filter_convert(
390         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
391         struct i40e_tunnel_filter *tunnel_filter);
392 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
393                                 struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
395
396 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
397 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
398 static void i40e_filter_restore(struct i40e_pf *pf);
399 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
400
401 int i40e_logtype_init;
402 int i40e_logtype_driver;
403
404 static const struct rte_pci_id pci_id_i40e_map[] = {
405         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
406         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
407         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
408         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
409         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
410         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
411         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
412         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
413         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
414         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
415         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
416         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
417         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
418         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
419         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
420         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
421         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
422         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
423         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
425         { .vendor_id = 0, /* sentinel */ },
426 };
427
428 static const struct eth_dev_ops i40e_eth_dev_ops = {
429         .dev_configure                = i40e_dev_configure,
430         .dev_start                    = i40e_dev_start,
431         .dev_stop                     = i40e_dev_stop,
432         .dev_close                    = i40e_dev_close,
433         .dev_reset                    = i40e_dev_reset,
434         .promiscuous_enable           = i40e_dev_promiscuous_enable,
435         .promiscuous_disable          = i40e_dev_promiscuous_disable,
436         .allmulticast_enable          = i40e_dev_allmulticast_enable,
437         .allmulticast_disable         = i40e_dev_allmulticast_disable,
438         .dev_set_link_up              = i40e_dev_set_link_up,
439         .dev_set_link_down            = i40e_dev_set_link_down,
440         .link_update                  = i40e_dev_link_update,
441         .stats_get                    = i40e_dev_stats_get,
442         .xstats_get                   = i40e_dev_xstats_get,
443         .xstats_get_names             = i40e_dev_xstats_get_names,
444         .stats_reset                  = i40e_dev_stats_reset,
445         .xstats_reset                 = i40e_dev_stats_reset,
446         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
447         .fw_version_get               = i40e_fw_version_get,
448         .dev_infos_get                = i40e_dev_info_get,
449         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
450         .vlan_filter_set              = i40e_vlan_filter_set,
451         .vlan_tpid_set                = i40e_vlan_tpid_set,
452         .vlan_offload_set             = i40e_vlan_offload_set,
453         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
454         .vlan_pvid_set                = i40e_vlan_pvid_set,
455         .rx_queue_start               = i40e_dev_rx_queue_start,
456         .rx_queue_stop                = i40e_dev_rx_queue_stop,
457         .tx_queue_start               = i40e_dev_tx_queue_start,
458         .tx_queue_stop                = i40e_dev_tx_queue_stop,
459         .rx_queue_setup               = i40e_dev_rx_queue_setup,
460         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
461         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
462         .rx_queue_release             = i40e_dev_rx_queue_release,
463         .rx_queue_count               = i40e_dev_rx_queue_count,
464         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
465         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
466         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
467         .tx_queue_setup               = i40e_dev_tx_queue_setup,
468         .tx_queue_release             = i40e_dev_tx_queue_release,
469         .dev_led_on                   = i40e_dev_led_on,
470         .dev_led_off                  = i40e_dev_led_off,
471         .flow_ctrl_get                = i40e_flow_ctrl_get,
472         .flow_ctrl_set                = i40e_flow_ctrl_set,
473         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
474         .mac_addr_add                 = i40e_macaddr_add,
475         .mac_addr_remove              = i40e_macaddr_remove,
476         .reta_update                  = i40e_dev_rss_reta_update,
477         .reta_query                   = i40e_dev_rss_reta_query,
478         .rss_hash_update              = i40e_dev_rss_hash_update,
479         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
480         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
481         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
482         .filter_ctrl                  = i40e_dev_filter_ctrl,
483         .rxq_info_get                 = i40e_rxq_info_get,
484         .txq_info_get                 = i40e_txq_info_get,
485         .mirror_rule_set              = i40e_mirror_rule_set,
486         .mirror_rule_reset            = i40e_mirror_rule_reset,
487         .timesync_enable              = i40e_timesync_enable,
488         .timesync_disable             = i40e_timesync_disable,
489         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
490         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
491         .get_dcb_info                 = i40e_dev_get_dcb_info,
492         .timesync_adjust_time         = i40e_timesync_adjust_time,
493         .timesync_read_time           = i40e_timesync_read_time,
494         .timesync_write_time          = i40e_timesync_write_time,
495         .get_reg                      = i40e_get_regs,
496         .get_eeprom_length            = i40e_get_eeprom_length,
497         .get_eeprom                   = i40e_get_eeprom,
498         .get_module_info              = i40e_get_module_info,
499         .get_module_eeprom            = i40e_get_module_eeprom,
500         .mac_addr_set                 = i40e_set_default_mac_addr,
501         .mtu_set                      = i40e_dev_mtu_set,
502         .tm_ops_get                   = i40e_tm_ops_get,
503 };
504
505 /* store statistics names and its offset in stats structure */
506 struct rte_i40e_xstats_name_off {
507         char name[RTE_ETH_XSTATS_NAME_SIZE];
508         unsigned offset;
509 };
510
511 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
512         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
513         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
514         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
515         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
516         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
517                 rx_unknown_protocol)},
518         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
519         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
520         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
521         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
522 };
523
524 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
525                 sizeof(rte_i40e_stats_strings[0]))
526
527 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
528         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
529                 tx_dropped_link_down)},
530         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
531         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
532                 illegal_bytes)},
533         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
534         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
535                 mac_local_faults)},
536         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
537                 mac_remote_faults)},
538         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
539                 rx_length_errors)},
540         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
541         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
542         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
543         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
544         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
545         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
546                 rx_size_127)},
547         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
548                 rx_size_255)},
549         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
550                 rx_size_511)},
551         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
552                 rx_size_1023)},
553         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
554                 rx_size_1522)},
555         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
556                 rx_size_big)},
557         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
558                 rx_undersize)},
559         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
560                 rx_oversize)},
561         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
562                 mac_short_packet_dropped)},
563         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
564                 rx_fragments)},
565         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
566         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
567         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
568                 tx_size_127)},
569         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
570                 tx_size_255)},
571         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
572                 tx_size_511)},
573         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
574                 tx_size_1023)},
575         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
576                 tx_size_1522)},
577         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
578                 tx_size_big)},
579         {"rx_flow_director_atr_match_packets",
580                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
581         {"rx_flow_director_sb_match_packets",
582                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
583         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
584                 tx_lpi_status)},
585         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
586                 rx_lpi_status)},
587         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
588                 tx_lpi_count)},
589         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
590                 rx_lpi_count)},
591 };
592
593 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
594                 sizeof(rte_i40e_hw_port_strings[0]))
595
596 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
597         {"xon_packets", offsetof(struct i40e_hw_port_stats,
598                 priority_xon_rx)},
599         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
600                 priority_xoff_rx)},
601 };
602
603 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
604                 sizeof(rte_i40e_rxq_prio_strings[0]))
605
606 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
607         {"xon_packets", offsetof(struct i40e_hw_port_stats,
608                 priority_xon_tx)},
609         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
610                 priority_xoff_tx)},
611         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
612                 priority_xon_2_xoff)},
613 };
614
615 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
616                 sizeof(rte_i40e_txq_prio_strings[0]))
617
618 static int
619 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
620         struct rte_pci_device *pci_dev)
621 {
622         char name[RTE_ETH_NAME_MAX_LEN];
623         struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
624         int i, retval;
625
626         if (pci_dev->device.devargs) {
627                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
628                                 &eth_da);
629                 if (retval)
630                         return retval;
631         }
632
633         /* physical port net_bdf_port */
634         snprintf(name, sizeof(name), "net_%s", pci_dev->device.name);
635
636         retval = rte_eth_dev_create(&pci_dev->device, name,
637                 sizeof(struct i40e_adapter),
638                 eth_dev_pci_specific_init, pci_dev,
639                 eth_i40e_dev_init, NULL);
640
641         if (retval || eth_da.nb_representor_ports < 1)
642                 return retval;
643
644         /* probe VF representor ports */
645         struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(name);
646
647         if (pf_ethdev == NULL)
648                 return -ENODEV;
649
650         for (i = 0; i < eth_da.nb_representor_ports; i++) {
651                 struct i40e_vf_representor representor = {
652                         .vf_id = eth_da.representor_ports[i],
653                         .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
654                                 pf_ethdev->data->dev_private)->switch_domain_id,
655                         .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
656                                 pf_ethdev->data->dev_private)
657                 };
658
659                 /* representor port net_bdf_port */
660                 snprintf(name, sizeof(name), "net_%s_representor_%d",
661                         pci_dev->device.name, eth_da.representor_ports[i]);
662
663                 retval = rte_eth_dev_create(&pci_dev->device, name,
664                         sizeof(struct i40e_vf_representor), NULL, NULL,
665                         i40e_vf_representor_init, &representor);
666
667                 if (retval)
668                         PMD_DRV_LOG(ERR, "failed to create i40e vf "
669                                 "representor %s.", name);
670         }
671
672         return 0;
673 }
674
675 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
676 {
677         struct rte_eth_dev *ethdev;
678
679         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
680         if (!ethdev)
681                 return -ENODEV;
682
683
684         if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
685                 return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
686         else
687                 return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
688 }
689
690 static struct rte_pci_driver rte_i40e_pmd = {
691         .id_table = pci_id_i40e_map,
692         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
693                      RTE_PCI_DRV_IOVA_AS_VA,
694         .probe = eth_i40e_pci_probe,
695         .remove = eth_i40e_pci_remove,
696 };
697
698 static inline void
699 i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
700 {
701         i40e_write_rx_ctl(hw, reg_addr, reg_val);
702         PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
703                     "with value 0x%08x",
704                     reg_addr, reg_val);
705 }
706
707 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
708 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
709 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
710
711 #ifndef I40E_GLQF_ORT
712 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
713 #endif
714 #ifndef I40E_GLQF_PIT
715 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
716 #endif
717 #ifndef I40E_GLQF_L3_MAP
718 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
719 #endif
720
721 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
722 {
723         /*
724          * Initialize registers for parsing packet type of QinQ
725          * This should be removed from code once proper
726          * configuration API is added to avoid configuration conflicts
727          * between ports of the same device.
728          */
729         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
730         I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
731         i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
732 }
733
734 static inline void i40e_config_automask(struct i40e_pf *pf)
735 {
736         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
737         uint32_t val;
738
739         /* INTENA flag is not auto-cleared for interrupt */
740         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
741         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
742                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
743
744         /* If support multi-driver, PF will use INT0. */
745         if (!pf->support_multi_driver)
746                 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
747
748         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
749 }
750
751 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
752
753 /*
754  * Add a ethertype filter to drop all flow control frames transmitted
755  * from VSIs.
756 */
757 static void
758 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
759 {
760         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
761         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
762                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
763                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
764         int ret;
765
766         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
767                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
768                                 pf->main_vsi_seid, 0,
769                                 TRUE, NULL, NULL);
770         if (ret)
771                 PMD_INIT_LOG(ERR,
772                         "Failed to add filter to drop flow control frames from VSIs.");
773 }
774
775 static int
776 floating_veb_list_handler(__rte_unused const char *key,
777                           const char *floating_veb_value,
778                           void *opaque)
779 {
780         int idx = 0;
781         unsigned int count = 0;
782         char *end = NULL;
783         int min, max;
784         bool *vf_floating_veb = opaque;
785
786         while (isblank(*floating_veb_value))
787                 floating_veb_value++;
788
789         /* Reset floating VEB configuration for VFs */
790         for (idx = 0; idx < I40E_MAX_VF; idx++)
791                 vf_floating_veb[idx] = false;
792
793         min = I40E_MAX_VF;
794         do {
795                 while (isblank(*floating_veb_value))
796                         floating_veb_value++;
797                 if (*floating_veb_value == '\0')
798                         return -1;
799                 errno = 0;
800                 idx = strtoul(floating_veb_value, &end, 10);
801                 if (errno || end == NULL)
802                         return -1;
803                 while (isblank(*end))
804                         end++;
805                 if (*end == '-') {
806                         min = idx;
807                 } else if ((*end == ';') || (*end == '\0')) {
808                         max = idx;
809                         if (min == I40E_MAX_VF)
810                                 min = idx;
811                         if (max >= I40E_MAX_VF)
812                                 max = I40E_MAX_VF - 1;
813                         for (idx = min; idx <= max; idx++) {
814                                 vf_floating_veb[idx] = true;
815                                 count++;
816                         }
817                         min = I40E_MAX_VF;
818                 } else {
819                         return -1;
820                 }
821                 floating_veb_value = end + 1;
822         } while (*end != '\0');
823
824         if (count == 0)
825                 return -1;
826
827         return 0;
828 }
829
830 static void
831 config_vf_floating_veb(struct rte_devargs *devargs,
832                        uint16_t floating_veb,
833                        bool *vf_floating_veb)
834 {
835         struct rte_kvargs *kvlist;
836         int i;
837         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
838
839         if (!floating_veb)
840                 return;
841         /* All the VFs attach to the floating VEB by default
842          * when the floating VEB is enabled.
843          */
844         for (i = 0; i < I40E_MAX_VF; i++)
845                 vf_floating_veb[i] = true;
846
847         if (devargs == NULL)
848                 return;
849
850         kvlist = rte_kvargs_parse(devargs->args, NULL);
851         if (kvlist == NULL)
852                 return;
853
854         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
855                 rte_kvargs_free(kvlist);
856                 return;
857         }
858         /* When the floating_veb_list parameter exists, all the VFs
859          * will attach to the legacy VEB firstly, then configure VFs
860          * to the floating VEB according to the floating_veb_list.
861          */
862         if (rte_kvargs_process(kvlist, floating_veb_list,
863                                floating_veb_list_handler,
864                                vf_floating_veb) < 0) {
865                 rte_kvargs_free(kvlist);
866                 return;
867         }
868         rte_kvargs_free(kvlist);
869 }
870
871 static int
872 i40e_check_floating_handler(__rte_unused const char *key,
873                             const char *value,
874                             __rte_unused void *opaque)
875 {
876         if (strcmp(value, "1"))
877                 return -1;
878
879         return 0;
880 }
881
882 static int
883 is_floating_veb_supported(struct rte_devargs *devargs)
884 {
885         struct rte_kvargs *kvlist;
886         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
887
888         if (devargs == NULL)
889                 return 0;
890
891         kvlist = rte_kvargs_parse(devargs->args, NULL);
892         if (kvlist == NULL)
893                 return 0;
894
895         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
896                 rte_kvargs_free(kvlist);
897                 return 0;
898         }
899         /* Floating VEB is enabled when there's key-value:
900          * enable_floating_veb=1
901          */
902         if (rte_kvargs_process(kvlist, floating_veb_key,
903                                i40e_check_floating_handler, NULL) < 0) {
904                 rte_kvargs_free(kvlist);
905                 return 0;
906         }
907         rte_kvargs_free(kvlist);
908
909         return 1;
910 }
911
912 static void
913 config_floating_veb(struct rte_eth_dev *dev)
914 {
915         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
916         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
917         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
918
919         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
920
921         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
922                 pf->floating_veb =
923                         is_floating_veb_supported(pci_dev->device.devargs);
924                 config_vf_floating_veb(pci_dev->device.devargs,
925                                        pf->floating_veb,
926                                        pf->floating_veb_list);
927         } else {
928                 pf->floating_veb = false;
929         }
930 }
931
932 #define I40E_L2_TAGS_S_TAG_SHIFT 1
933 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
934
935 static int
936 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
937 {
938         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
939         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
940         char ethertype_hash_name[RTE_HASH_NAMESIZE];
941         int ret;
942
943         struct rte_hash_parameters ethertype_hash_params = {
944                 .name = ethertype_hash_name,
945                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
946                 .key_len = sizeof(struct i40e_ethertype_filter_input),
947                 .hash_func = rte_hash_crc,
948                 .hash_func_init_val = 0,
949                 .socket_id = rte_socket_id(),
950         };
951
952         /* Initialize ethertype filter rule list and hash */
953         TAILQ_INIT(&ethertype_rule->ethertype_list);
954         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
955                  "ethertype_%s", dev->device->name);
956         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
957         if (!ethertype_rule->hash_table) {
958                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
959                 return -EINVAL;
960         }
961         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
962                                        sizeof(struct i40e_ethertype_filter *) *
963                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
964                                        0);
965         if (!ethertype_rule->hash_map) {
966                 PMD_INIT_LOG(ERR,
967                              "Failed to allocate memory for ethertype hash map!");
968                 ret = -ENOMEM;
969                 goto err_ethertype_hash_map_alloc;
970         }
971
972         return 0;
973
974 err_ethertype_hash_map_alloc:
975         rte_hash_free(ethertype_rule->hash_table);
976
977         return ret;
978 }
979
980 static int
981 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
982 {
983         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
984         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
985         char tunnel_hash_name[RTE_HASH_NAMESIZE];
986         int ret;
987
988         struct rte_hash_parameters tunnel_hash_params = {
989                 .name = tunnel_hash_name,
990                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
991                 .key_len = sizeof(struct i40e_tunnel_filter_input),
992                 .hash_func = rte_hash_crc,
993                 .hash_func_init_val = 0,
994                 .socket_id = rte_socket_id(),
995         };
996
997         /* Initialize tunnel filter rule list and hash */
998         TAILQ_INIT(&tunnel_rule->tunnel_list);
999         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1000                  "tunnel_%s", dev->device->name);
1001         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1002         if (!tunnel_rule->hash_table) {
1003                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1004                 return -EINVAL;
1005         }
1006         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1007                                     sizeof(struct i40e_tunnel_filter *) *
1008                                     I40E_MAX_TUNNEL_FILTER_NUM,
1009                                     0);
1010         if (!tunnel_rule->hash_map) {
1011                 PMD_INIT_LOG(ERR,
1012                              "Failed to allocate memory for tunnel hash map!");
1013                 ret = -ENOMEM;
1014                 goto err_tunnel_hash_map_alloc;
1015         }
1016
1017         return 0;
1018
1019 err_tunnel_hash_map_alloc:
1020         rte_hash_free(tunnel_rule->hash_table);
1021
1022         return ret;
1023 }
1024
1025 static int
1026 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1027 {
1028         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1029         struct i40e_fdir_info *fdir_info = &pf->fdir;
1030         char fdir_hash_name[RTE_HASH_NAMESIZE];
1031         int ret;
1032
1033         struct rte_hash_parameters fdir_hash_params = {
1034                 .name = fdir_hash_name,
1035                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1036                 .key_len = sizeof(struct i40e_fdir_input),
1037                 .hash_func = rte_hash_crc,
1038                 .hash_func_init_val = 0,
1039                 .socket_id = rte_socket_id(),
1040         };
1041
1042         /* Initialize flow director filter rule list and hash */
1043         TAILQ_INIT(&fdir_info->fdir_list);
1044         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1045                  "fdir_%s", dev->device->name);
1046         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1047         if (!fdir_info->hash_table) {
1048                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1049                 return -EINVAL;
1050         }
1051         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1052                                           sizeof(struct i40e_fdir_filter *) *
1053                                           I40E_MAX_FDIR_FILTER_NUM,
1054                                           0);
1055         if (!fdir_info->hash_map) {
1056                 PMD_INIT_LOG(ERR,
1057                              "Failed to allocate memory for fdir hash map!");
1058                 ret = -ENOMEM;
1059                 goto err_fdir_hash_map_alloc;
1060         }
1061         return 0;
1062
1063 err_fdir_hash_map_alloc:
1064         rte_hash_free(fdir_info->hash_table);
1065
1066         return ret;
1067 }
1068
1069 static void
1070 i40e_init_customized_info(struct i40e_pf *pf)
1071 {
1072         int i;
1073
1074         /* Initialize customized pctype */
1075         for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1076                 pf->customized_pctype[i].index = i;
1077                 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1078                 pf->customized_pctype[i].valid = false;
1079         }
1080
1081         pf->gtp_support = false;
1082 }
1083
1084 void
1085 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1086 {
1087         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1088         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1089         struct i40e_queue_regions *info = &pf->queue_region;
1090         uint16_t i;
1091
1092         for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1093                 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1094
1095         memset(info, 0, sizeof(struct i40e_queue_regions));
1096 }
1097
1098 #define ETH_I40E_SUPPORT_MULTI_DRIVER   "support-multi-driver"
1099
1100 static int
1101 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1102                                const char *value,
1103                                void *opaque)
1104 {
1105         struct i40e_pf *pf;
1106         unsigned long support_multi_driver;
1107         char *end;
1108
1109         pf = (struct i40e_pf *)opaque;
1110
1111         errno = 0;
1112         support_multi_driver = strtoul(value, &end, 10);
1113         if (errno != 0 || end == value || *end != 0) {
1114                 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1115                 return -(EINVAL);
1116         }
1117
1118         if (support_multi_driver == 1 || support_multi_driver == 0)
1119                 pf->support_multi_driver = (bool)support_multi_driver;
1120         else
1121                 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1122                             "enable global configuration by default."
1123                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1124         return 0;
1125 }
1126
1127 static int
1128 i40e_support_multi_driver(struct rte_eth_dev *dev)
1129 {
1130         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1131         static const char *const valid_keys[] = {
1132                 ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
1133         struct rte_kvargs *kvlist;
1134
1135         /* Enable global configuration by default */
1136         pf->support_multi_driver = false;
1137
1138         if (!dev->device->devargs)
1139                 return 0;
1140
1141         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1142         if (!kvlist)
1143                 return -EINVAL;
1144
1145         if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
1146                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1147                             "the first invalid or last valid one is used !",
1148                             ETH_I40E_SUPPORT_MULTI_DRIVER);
1149
1150         if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1151                                i40e_parse_multi_drv_handler, pf) < 0) {
1152                 rte_kvargs_free(kvlist);
1153                 return -EINVAL;
1154         }
1155
1156         rte_kvargs_free(kvlist);
1157         return 0;
1158 }
1159
1160 static int
1161 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1162 {
1163         struct rte_pci_device *pci_dev;
1164         struct rte_intr_handle *intr_handle;
1165         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1166         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1167         struct i40e_vsi *vsi;
1168         int ret;
1169         uint32_t len;
1170         uint8_t aq_fail = 0;
1171
1172         PMD_INIT_FUNC_TRACE();
1173
1174         dev->dev_ops = &i40e_eth_dev_ops;
1175         dev->rx_pkt_burst = i40e_recv_pkts;
1176         dev->tx_pkt_burst = i40e_xmit_pkts;
1177         dev->tx_pkt_prepare = i40e_prep_pkts;
1178
1179         /* for secondary processes, we don't initialise any further as primary
1180          * has already done this work. Only check we don't need a different
1181          * RX function */
1182         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1183                 i40e_set_rx_function(dev);
1184                 i40e_set_tx_function(dev);
1185                 return 0;
1186         }
1187         i40e_set_default_ptype_table(dev);
1188         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1189         intr_handle = &pci_dev->intr_handle;
1190
1191         rte_eth_copy_pci_info(dev, pci_dev);
1192
1193         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1194         pf->adapter->eth_dev = dev;
1195         pf->dev_data = dev->data;
1196
1197         hw->back = I40E_PF_TO_ADAPTER(pf);
1198         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1199         if (!hw->hw_addr) {
1200                 PMD_INIT_LOG(ERR,
1201                         "Hardware is not available, as address is NULL");
1202                 return -ENODEV;
1203         }
1204
1205         hw->vendor_id = pci_dev->id.vendor_id;
1206         hw->device_id = pci_dev->id.device_id;
1207         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1208         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1209         hw->bus.device = pci_dev->addr.devid;
1210         hw->bus.func = pci_dev->addr.function;
1211         hw->adapter_stopped = 0;
1212
1213         /* Check if need to support multi-driver */
1214         i40e_support_multi_driver(dev);
1215
1216         /* Make sure all is clean before doing PF reset */
1217         i40e_clear_hw(hw);
1218
1219         /* Initialize the hardware */
1220         i40e_hw_init(dev);
1221
1222         /* Reset here to make sure all is clean for each PF */
1223         ret = i40e_pf_reset(hw);
1224         if (ret) {
1225                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1226                 return ret;
1227         }
1228
1229         /* Initialize the shared code (base driver) */
1230         ret = i40e_init_shared_code(hw);
1231         if (ret) {
1232                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1233                 return ret;
1234         }
1235
1236         i40e_config_automask(pf);
1237
1238         i40e_set_default_pctype_table(dev);
1239
1240         /*
1241          * To work around the NVM issue, initialize registers
1242          * for packet type of QinQ by software.
1243          * It should be removed once issues are fixed in NVM.
1244          */
1245         if (!pf->support_multi_driver)
1246                 i40e_GLQF_reg_init(hw);
1247
1248         /* Initialize the input set for filters (hash and fd) to default value */
1249         i40e_filter_input_set_init(pf);
1250
1251         /* Initialize the parameters for adminq */
1252         i40e_init_adminq_parameter(hw);
1253         ret = i40e_init_adminq(hw);
1254         if (ret != I40E_SUCCESS) {
1255                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1256                 return -EIO;
1257         }
1258         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1259                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1260                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1261                      ((hw->nvm.version >> 12) & 0xf),
1262                      ((hw->nvm.version >> 4) & 0xff),
1263                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1264
1265         /* initialise the L3_MAP register */
1266         if (!pf->support_multi_driver) {
1267                 ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1268                                                    0x00000028,  NULL);
1269                 if (ret)
1270                         PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1271                                      ret);
1272                 PMD_INIT_LOG(DEBUG,
1273                              "Global register 0x%08x is changed with 0x28",
1274                              I40E_GLQF_L3_MAP(40));
1275                 i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
1276         }
1277
1278         /* Need the special FW version to support floating VEB */
1279         config_floating_veb(dev);
1280         /* Clear PXE mode */
1281         i40e_clear_pxe_mode(hw);
1282         i40e_dev_sync_phy_type(hw);
1283
1284         /*
1285          * On X710, performance number is far from the expectation on recent
1286          * firmware versions. The fix for this issue may not be integrated in
1287          * the following firmware version. So the workaround in software driver
1288          * is needed. It needs to modify the initial values of 3 internal only
1289          * registers. Note that the workaround can be removed when it is fixed
1290          * in firmware in the future.
1291          */
1292         i40e_configure_registers(hw);
1293
1294         /* Get hw capabilities */
1295         ret = i40e_get_cap(hw);
1296         if (ret != I40E_SUCCESS) {
1297                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1298                 goto err_get_capabilities;
1299         }
1300
1301         /* Initialize parameters for PF */
1302         ret = i40e_pf_parameter_init(dev);
1303         if (ret != 0) {
1304                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1305                 goto err_parameter_init;
1306         }
1307
1308         /* Initialize the queue management */
1309         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1310         if (ret < 0) {
1311                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1312                 goto err_qp_pool_init;
1313         }
1314         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1315                                 hw->func_caps.num_msix_vectors - 1);
1316         if (ret < 0) {
1317                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1318                 goto err_msix_pool_init;
1319         }
1320
1321         /* Initialize lan hmc */
1322         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1323                                 hw->func_caps.num_rx_qp, 0, 0);
1324         if (ret != I40E_SUCCESS) {
1325                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1326                 goto err_init_lan_hmc;
1327         }
1328
1329         /* Configure lan hmc */
1330         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1331         if (ret != I40E_SUCCESS) {
1332                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1333                 goto err_configure_lan_hmc;
1334         }
1335
1336         /* Get and check the mac address */
1337         i40e_get_mac_addr(hw, hw->mac.addr);
1338         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1339                 PMD_INIT_LOG(ERR, "mac address is not valid");
1340                 ret = -EIO;
1341                 goto err_get_mac_addr;
1342         }
1343         /* Copy the permanent MAC address */
1344         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1345                         (struct ether_addr *) hw->mac.perm_addr);
1346
1347         /* Disable flow control */
1348         hw->fc.requested_mode = I40E_FC_NONE;
1349         i40e_set_fc(hw, &aq_fail, TRUE);
1350
1351         /* Set the global registers with default ether type value */
1352         if (!pf->support_multi_driver) {
1353                 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1354                                          ETHER_TYPE_VLAN);
1355                 if (ret != I40E_SUCCESS) {
1356                         PMD_INIT_LOG(ERR,
1357                                      "Failed to set the default outer "
1358                                      "VLAN ether type");
1359                         goto err_setup_pf_switch;
1360                 }
1361         }
1362
1363         /* PF setup, which includes VSI setup */
1364         ret = i40e_pf_setup(pf);
1365         if (ret) {
1366                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1367                 goto err_setup_pf_switch;
1368         }
1369
1370         /* reset all stats of the device, including pf and main vsi */
1371         i40e_dev_stats_reset(dev);
1372
1373         vsi = pf->main_vsi;
1374
1375         /* Disable double vlan by default */
1376         i40e_vsi_config_double_vlan(vsi, FALSE);
1377
1378         /* Disable S-TAG identification when floating_veb is disabled */
1379         if (!pf->floating_veb) {
1380                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1381                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1382                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1383                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1384                 }
1385         }
1386
1387         if (!vsi->max_macaddrs)
1388                 len = ETHER_ADDR_LEN;
1389         else
1390                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1391
1392         /* Should be after VSI initialized */
1393         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1394         if (!dev->data->mac_addrs) {
1395                 PMD_INIT_LOG(ERR,
1396                         "Failed to allocated memory for storing mac address");
1397                 goto err_mac_alloc;
1398         }
1399         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1400                                         &dev->data->mac_addrs[0]);
1401
1402         /* Init dcb to sw mode by default */
1403         ret = i40e_dcb_init_configure(dev, TRUE);
1404         if (ret != I40E_SUCCESS) {
1405                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1406                 pf->flags &= ~I40E_FLAG_DCB;
1407         }
1408         /* Update HW struct after DCB configuration */
1409         i40e_get_cap(hw);
1410
1411         /* initialize pf host driver to setup SRIOV resource if applicable */
1412         i40e_pf_host_init(dev);
1413
1414         /* register callback func to eal lib */
1415         rte_intr_callback_register(intr_handle,
1416                                    i40e_dev_interrupt_handler, dev);
1417
1418         /* configure and enable device interrupt */
1419         i40e_pf_config_irq0(hw, TRUE);
1420         i40e_pf_enable_irq0(hw);
1421
1422         /* enable uio intr after callback register */
1423         rte_intr_enable(intr_handle);
1424
1425         /* By default disable flexible payload in global configuration */
1426         if (!pf->support_multi_driver)
1427                 i40e_flex_payload_reg_set_default(hw);
1428
1429         /*
1430          * Add an ethertype filter to drop all flow control frames transmitted
1431          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1432          * frames to wire.
1433          */
1434         i40e_add_tx_flow_control_drop_filter(pf);
1435
1436         /* Set the max frame size to 0x2600 by default,
1437          * in case other drivers changed the default value.
1438          */
1439         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1440
1441         /* initialize mirror rule list */
1442         TAILQ_INIT(&pf->mirror_list);
1443
1444         /* initialize Traffic Manager configuration */
1445         i40e_tm_conf_init(dev);
1446
1447         /* Initialize customized information */
1448         i40e_init_customized_info(pf);
1449
1450         ret = i40e_init_ethtype_filter_list(dev);
1451         if (ret < 0)
1452                 goto err_init_ethtype_filter_list;
1453         ret = i40e_init_tunnel_filter_list(dev);
1454         if (ret < 0)
1455                 goto err_init_tunnel_filter_list;
1456         ret = i40e_init_fdir_filter_list(dev);
1457         if (ret < 0)
1458                 goto err_init_fdir_filter_list;
1459
1460         /* initialize queue region configuration */
1461         i40e_init_queue_region_conf(dev);
1462
1463         /* initialize rss configuration from rte_flow */
1464         memset(&pf->rss_info, 0,
1465                 sizeof(struct i40e_rte_flow_rss_conf));
1466
1467         return 0;
1468
1469 err_init_fdir_filter_list:
1470         rte_free(pf->tunnel.hash_table);
1471         rte_free(pf->tunnel.hash_map);
1472 err_init_tunnel_filter_list:
1473         rte_free(pf->ethertype.hash_table);
1474         rte_free(pf->ethertype.hash_map);
1475 err_init_ethtype_filter_list:
1476         rte_free(dev->data->mac_addrs);
1477 err_mac_alloc:
1478         i40e_vsi_release(pf->main_vsi);
1479 err_setup_pf_switch:
1480 err_get_mac_addr:
1481 err_configure_lan_hmc:
1482         (void)i40e_shutdown_lan_hmc(hw);
1483 err_init_lan_hmc:
1484         i40e_res_pool_destroy(&pf->msix_pool);
1485 err_msix_pool_init:
1486         i40e_res_pool_destroy(&pf->qp_pool);
1487 err_qp_pool_init:
1488 err_parameter_init:
1489 err_get_capabilities:
1490         (void)i40e_shutdown_adminq(hw);
1491
1492         return ret;
1493 }
1494
1495 static void
1496 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1497 {
1498         struct i40e_ethertype_filter *p_ethertype;
1499         struct i40e_ethertype_rule *ethertype_rule;
1500
1501         ethertype_rule = &pf->ethertype;
1502         /* Remove all ethertype filter rules and hash */
1503         if (ethertype_rule->hash_map)
1504                 rte_free(ethertype_rule->hash_map);
1505         if (ethertype_rule->hash_table)
1506                 rte_hash_free(ethertype_rule->hash_table);
1507
1508         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1509                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1510                              p_ethertype, rules);
1511                 rte_free(p_ethertype);
1512         }
1513 }
1514
1515 static void
1516 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1517 {
1518         struct i40e_tunnel_filter *p_tunnel;
1519         struct i40e_tunnel_rule *tunnel_rule;
1520
1521         tunnel_rule = &pf->tunnel;
1522         /* Remove all tunnel director rules and hash */
1523         if (tunnel_rule->hash_map)
1524                 rte_free(tunnel_rule->hash_map);
1525         if (tunnel_rule->hash_table)
1526                 rte_hash_free(tunnel_rule->hash_table);
1527
1528         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1529                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1530                 rte_free(p_tunnel);
1531         }
1532 }
1533
1534 static void
1535 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1536 {
1537         struct i40e_fdir_filter *p_fdir;
1538         struct i40e_fdir_info *fdir_info;
1539
1540         fdir_info = &pf->fdir;
1541         /* Remove all flow director rules and hash */
1542         if (fdir_info->hash_map)
1543                 rte_free(fdir_info->hash_map);
1544         if (fdir_info->hash_table)
1545                 rte_hash_free(fdir_info->hash_table);
1546
1547         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1548                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1549                 rte_free(p_fdir);
1550         }
1551 }
1552
1553 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1554 {
1555         /*
1556          * Disable by default flexible payload
1557          * for corresponding L2/L3/L4 layers.
1558          */
1559         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1560         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1561         I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1562         i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
1563 }
1564
1565 static int
1566 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1567 {
1568         struct i40e_pf *pf;
1569         struct rte_pci_device *pci_dev;
1570         struct rte_intr_handle *intr_handle;
1571         struct i40e_hw *hw;
1572         struct i40e_filter_control_settings settings;
1573         struct rte_flow *p_flow;
1574         int ret;
1575         uint8_t aq_fail = 0;
1576         int retries = 0;
1577
1578         PMD_INIT_FUNC_TRACE();
1579
1580         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1581                 return 0;
1582
1583         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1584         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1585         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1586         intr_handle = &pci_dev->intr_handle;
1587
1588         ret = rte_eth_switch_domain_free(pf->switch_domain_id);
1589         if (ret)
1590                 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
1591
1592         if (hw->adapter_stopped == 0)
1593                 i40e_dev_close(dev);
1594
1595         dev->dev_ops = NULL;
1596         dev->rx_pkt_burst = NULL;
1597         dev->tx_pkt_burst = NULL;
1598
1599         /* Clear PXE mode */
1600         i40e_clear_pxe_mode(hw);
1601
1602         /* Unconfigure filter control */
1603         memset(&settings, 0, sizeof(settings));
1604         ret = i40e_set_filter_control(hw, &settings);
1605         if (ret)
1606                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1607                                         ret);
1608
1609         /* Disable flow control */
1610         hw->fc.requested_mode = I40E_FC_NONE;
1611         i40e_set_fc(hw, &aq_fail, TRUE);
1612
1613         /* uninitialize pf host driver */
1614         i40e_pf_host_uninit(dev);
1615
1616         rte_free(dev->data->mac_addrs);
1617         dev->data->mac_addrs = NULL;
1618
1619         /* disable uio intr before callback unregister */
1620         rte_intr_disable(intr_handle);
1621
1622         /* unregister callback func to eal lib */
1623         do {
1624                 ret = rte_intr_callback_unregister(intr_handle,
1625                                 i40e_dev_interrupt_handler, dev);
1626                 if (ret >= 0) {
1627                         break;
1628                 } else if (ret != -EAGAIN) {
1629                         PMD_INIT_LOG(ERR,
1630                                  "intr callback unregister failed: %d",
1631                                  ret);
1632                         return ret;
1633                 }
1634                 i40e_msec_delay(500);
1635         } while (retries++ < 5);
1636
1637         i40e_rm_ethtype_filter_list(pf);
1638         i40e_rm_tunnel_filter_list(pf);
1639         i40e_rm_fdir_filter_list(pf);
1640
1641         /* Remove all flows */
1642         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1643                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1644                 rte_free(p_flow);
1645         }
1646
1647         /* Remove all Traffic Manager configuration */
1648         i40e_tm_conf_uninit(dev);
1649
1650         return 0;
1651 }
1652
1653 static int
1654 i40e_dev_configure(struct rte_eth_dev *dev)
1655 {
1656         struct i40e_adapter *ad =
1657                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1658         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1659         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1660         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1661         int i, ret;
1662
1663         ret = i40e_dev_sync_phy_type(hw);
1664         if (ret)
1665                 return ret;
1666
1667         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1668          * bulk allocation or vector Rx preconditions we will reset it.
1669          */
1670         ad->rx_bulk_alloc_allowed = true;
1671         ad->rx_vec_allowed = true;
1672         ad->tx_simple_allowed = true;
1673         ad->tx_vec_allowed = true;
1674
1675         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1676                 ret = i40e_fdir_setup(pf);
1677                 if (ret != I40E_SUCCESS) {
1678                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1679                         return -ENOTSUP;
1680                 }
1681                 ret = i40e_fdir_configure(dev);
1682                 if (ret < 0) {
1683                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1684                         goto err;
1685                 }
1686         } else
1687                 i40e_fdir_teardown(pf);
1688
1689         ret = i40e_dev_init_vlan(dev);
1690         if (ret < 0)
1691                 goto err;
1692
1693         /* VMDQ setup.
1694          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1695          *  RSS setting have different requirements.
1696          *  General PMD driver call sequence are NIC init, configure,
1697          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1698          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1699          *  applicable. So, VMDQ setting has to be done before
1700          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1701          *  For RSS setting, it will try to calculate actual configured RX queue
1702          *  number, which will be available after rx_queue_setup(). dev_start()
1703          *  function is good to place RSS setup.
1704          */
1705         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1706                 ret = i40e_vmdq_setup(dev);
1707                 if (ret)
1708                         goto err;
1709         }
1710
1711         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1712                 ret = i40e_dcb_setup(dev);
1713                 if (ret) {
1714                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1715                         goto err_dcb;
1716                 }
1717         }
1718
1719         TAILQ_INIT(&pf->flow_list);
1720
1721         return 0;
1722
1723 err_dcb:
1724         /* need to release vmdq resource if exists */
1725         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1726                 i40e_vsi_release(pf->vmdq[i].vsi);
1727                 pf->vmdq[i].vsi = NULL;
1728         }
1729         rte_free(pf->vmdq);
1730         pf->vmdq = NULL;
1731 err:
1732         /* need to release fdir resource if exists */
1733         i40e_fdir_teardown(pf);
1734         return ret;
1735 }
1736
1737 void
1738 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1739 {
1740         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1741         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1742         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1743         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1744         uint16_t msix_vect = vsi->msix_intr;
1745         uint16_t i;
1746
1747         for (i = 0; i < vsi->nb_qps; i++) {
1748                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1749                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1750                 rte_wmb();
1751         }
1752
1753         if (vsi->type != I40E_VSI_SRIOV) {
1754                 if (!rte_intr_allow_others(intr_handle)) {
1755                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1756                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1757                         I40E_WRITE_REG(hw,
1758                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1759                                        0);
1760                 } else {
1761                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1762                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1763                         I40E_WRITE_REG(hw,
1764                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1765                                                        msix_vect - 1), 0);
1766                 }
1767         } else {
1768                 uint32_t reg;
1769                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1770                         vsi->user_param + (msix_vect - 1);
1771
1772                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1773                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1774         }
1775         I40E_WRITE_FLUSH(hw);
1776 }
1777
1778 static void
1779 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1780                        int base_queue, int nb_queue,
1781                        uint16_t itr_idx)
1782 {
1783         int i;
1784         uint32_t val;
1785         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1786         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1787
1788         /* Bind all RX queues to allocated MSIX interrupt */
1789         for (i = 0; i < nb_queue; i++) {
1790                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1791                         itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1792                         ((base_queue + i + 1) <<
1793                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1794                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1795                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1796
1797                 if (i == nb_queue - 1)
1798                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1799                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1800         }
1801
1802         /* Write first RX queue to Link list register as the head element */
1803         if (vsi->type != I40E_VSI_SRIOV) {
1804                 uint16_t interval =
1805                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
1806                                                pf->support_multi_driver);
1807
1808                 if (msix_vect == I40E_MISC_VEC_ID) {
1809                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1810                                        (base_queue <<
1811                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1812                                        (0x0 <<
1813                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1814                         I40E_WRITE_REG(hw,
1815                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1816                                        interval);
1817                 } else {
1818                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1819                                        (base_queue <<
1820                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1821                                        (0x0 <<
1822                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1823                         I40E_WRITE_REG(hw,
1824                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1825                                                        msix_vect - 1),
1826                                        interval);
1827                 }
1828         } else {
1829                 uint32_t reg;
1830
1831                 if (msix_vect == I40E_MISC_VEC_ID) {
1832                         I40E_WRITE_REG(hw,
1833                                        I40E_VPINT_LNKLST0(vsi->user_param),
1834                                        (base_queue <<
1835                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1836                                        (0x0 <<
1837                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1838                 } else {
1839                         /* num_msix_vectors_vf needs to minus irq0 */
1840                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1841                                 vsi->user_param + (msix_vect - 1);
1842
1843                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1844                                        (base_queue <<
1845                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1846                                        (0x0 <<
1847                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1848                 }
1849         }
1850
1851         I40E_WRITE_FLUSH(hw);
1852 }
1853
1854 void
1855 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1856 {
1857         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1858         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1859         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1860         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1861         uint16_t msix_vect = vsi->msix_intr;
1862         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1863         uint16_t queue_idx = 0;
1864         int record = 0;
1865         int i;
1866
1867         for (i = 0; i < vsi->nb_qps; i++) {
1868                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1869                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1870         }
1871
1872         /* VF bind interrupt */
1873         if (vsi->type == I40E_VSI_SRIOV) {
1874                 __vsi_queues_bind_intr(vsi, msix_vect,
1875                                        vsi->base_queue, vsi->nb_qps,
1876                                        itr_idx);
1877                 return;
1878         }
1879
1880         /* PF & VMDq bind interrupt */
1881         if (rte_intr_dp_is_en(intr_handle)) {
1882                 if (vsi->type == I40E_VSI_MAIN) {
1883                         queue_idx = 0;
1884                         record = 1;
1885                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1886                         struct i40e_vsi *main_vsi =
1887                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1888                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1889                         record = 1;
1890                 }
1891         }
1892
1893         for (i = 0; i < vsi->nb_used_qps; i++) {
1894                 if (nb_msix <= 1) {
1895                         if (!rte_intr_allow_others(intr_handle))
1896                                 /* allow to share MISC_VEC_ID */
1897                                 msix_vect = I40E_MISC_VEC_ID;
1898
1899                         /* no enough msix_vect, map all to one */
1900                         __vsi_queues_bind_intr(vsi, msix_vect,
1901                                                vsi->base_queue + i,
1902                                                vsi->nb_used_qps - i,
1903                                                itr_idx);
1904                         for (; !!record && i < vsi->nb_used_qps; i++)
1905                                 intr_handle->intr_vec[queue_idx + i] =
1906                                         msix_vect;
1907                         break;
1908                 }
1909                 /* 1:1 queue/msix_vect mapping */
1910                 __vsi_queues_bind_intr(vsi, msix_vect,
1911                                        vsi->base_queue + i, 1,
1912                                        itr_idx);
1913                 if (!!record)
1914                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1915
1916                 msix_vect++;
1917                 nb_msix--;
1918         }
1919 }
1920
1921 static void
1922 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1923 {
1924         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1925         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1926         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1927         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1928         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1929         uint16_t msix_intr, i;
1930
1931         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1932                 for (i = 0; i < vsi->nb_msix; i++) {
1933                         msix_intr = vsi->msix_intr + i;
1934                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1935                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1936                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1937                                 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1938                 }
1939         else
1940                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1941                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1942                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1943                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1944
1945         I40E_WRITE_FLUSH(hw);
1946 }
1947
1948 static void
1949 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1950 {
1951         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1952         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1953         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1954         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1955         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1956         uint16_t msix_intr, i;
1957
1958         if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1959                 for (i = 0; i < vsi->nb_msix; i++) {
1960                         msix_intr = vsi->msix_intr + i;
1961                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1962                                        I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1963                 }
1964         else
1965                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1966                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1967
1968         I40E_WRITE_FLUSH(hw);
1969 }
1970
1971 static inline uint8_t
1972 i40e_parse_link_speeds(uint16_t link_speeds)
1973 {
1974         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1975
1976         if (link_speeds & ETH_LINK_SPEED_40G)
1977                 link_speed |= I40E_LINK_SPEED_40GB;
1978         if (link_speeds & ETH_LINK_SPEED_25G)
1979                 link_speed |= I40E_LINK_SPEED_25GB;
1980         if (link_speeds & ETH_LINK_SPEED_20G)
1981                 link_speed |= I40E_LINK_SPEED_20GB;
1982         if (link_speeds & ETH_LINK_SPEED_10G)
1983                 link_speed |= I40E_LINK_SPEED_10GB;
1984         if (link_speeds & ETH_LINK_SPEED_1G)
1985                 link_speed |= I40E_LINK_SPEED_1GB;
1986         if (link_speeds & ETH_LINK_SPEED_100M)
1987                 link_speed |= I40E_LINK_SPEED_100MB;
1988
1989         return link_speed;
1990 }
1991
1992 static int
1993 i40e_phy_conf_link(struct i40e_hw *hw,
1994                    uint8_t abilities,
1995                    uint8_t force_speed,
1996                    bool is_up)
1997 {
1998         enum i40e_status_code status;
1999         struct i40e_aq_get_phy_abilities_resp phy_ab;
2000         struct i40e_aq_set_phy_config phy_conf;
2001         enum i40e_aq_phy_type cnt;
2002         uint32_t phy_type_mask = 0;
2003
2004         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2005                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2006                         I40E_AQ_PHY_FLAG_PAUSE_RX |
2007                         I40E_AQ_PHY_FLAG_LOW_POWER;
2008         const uint8_t advt = I40E_LINK_SPEED_40GB |
2009                         I40E_LINK_SPEED_25GB |
2010                         I40E_LINK_SPEED_10GB |
2011                         I40E_LINK_SPEED_1GB |
2012                         I40E_LINK_SPEED_100MB;
2013         int ret = -ENOTSUP;
2014
2015
2016         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2017                                               NULL);
2018         if (status)
2019                 return ret;
2020
2021         /* If link already up, no need to set up again */
2022         if (is_up && phy_ab.phy_type != 0)
2023                 return I40E_SUCCESS;
2024
2025         memset(&phy_conf, 0, sizeof(phy_conf));
2026
2027         /* bits 0-2 use the values from get_phy_abilities_resp */
2028         abilities &= ~mask;
2029         abilities |= phy_ab.abilities & mask;
2030
2031         /* update ablities and speed */
2032         if (abilities & I40E_AQ_PHY_AN_ENABLED)
2033                 phy_conf.link_speed = advt;
2034         else
2035                 phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
2036
2037         phy_conf.abilities = abilities;
2038
2039
2040
2041         /* To enable link, phy_type mask needs to include each type */
2042         for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
2043                 phy_type_mask |= 1 << cnt;
2044
2045         /* use get_phy_abilities_resp value for the rest */
2046         phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2047         phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2048                 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2049                 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2050         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2051         phy_conf.eee_capability = phy_ab.eee_capability;
2052         phy_conf.eeer = phy_ab.eeer_val;
2053         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2054
2055         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2056                     phy_ab.abilities, phy_ab.link_speed);
2057         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2058                     phy_conf.abilities, phy_conf.link_speed);
2059
2060         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2061         if (status)
2062                 return ret;
2063
2064         return I40E_SUCCESS;
2065 }
2066
2067 static int
2068 i40e_apply_link_speed(struct rte_eth_dev *dev)
2069 {
2070         uint8_t speed;
2071         uint8_t abilities = 0;
2072         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2073         struct rte_eth_conf *conf = &dev->data->dev_conf;
2074
2075         speed = i40e_parse_link_speeds(conf->link_speeds);
2076         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2077         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
2078                 abilities |= I40E_AQ_PHY_AN_ENABLED;
2079         abilities |= I40E_AQ_PHY_LINK_ENABLED;
2080
2081         return i40e_phy_conf_link(hw, abilities, speed, true);
2082 }
2083
2084 static int
2085 i40e_dev_start(struct rte_eth_dev *dev)
2086 {
2087         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2088         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2089         struct i40e_vsi *main_vsi = pf->main_vsi;
2090         int ret, i;
2091         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2092         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2093         uint32_t intr_vector = 0;
2094         struct i40e_vsi *vsi;
2095
2096         hw->adapter_stopped = 0;
2097
2098         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2099                 PMD_INIT_LOG(ERR,
2100                 "Invalid link_speeds for port %u, autonegotiation disabled",
2101                               dev->data->port_id);
2102                 return -EINVAL;
2103         }
2104
2105         rte_intr_disable(intr_handle);
2106
2107         if ((rte_intr_cap_multiple(intr_handle) ||
2108              !RTE_ETH_DEV_SRIOV(dev).active) &&
2109             dev->data->dev_conf.intr_conf.rxq != 0) {
2110                 intr_vector = dev->data->nb_rx_queues;
2111                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2112                 if (ret)
2113                         return ret;
2114         }
2115
2116         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2117                 intr_handle->intr_vec =
2118                         rte_zmalloc("intr_vec",
2119                                     dev->data->nb_rx_queues * sizeof(int),
2120                                     0);
2121                 if (!intr_handle->intr_vec) {
2122                         PMD_INIT_LOG(ERR,
2123                                 "Failed to allocate %d rx_queues intr_vec",
2124                                 dev->data->nb_rx_queues);
2125                         return -ENOMEM;
2126                 }
2127         }
2128
2129         /* Initialize VSI */
2130         ret = i40e_dev_rxtx_init(pf);
2131         if (ret != I40E_SUCCESS) {
2132                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2133                 goto err_up;
2134         }
2135
2136         /* Map queues with MSIX interrupt */
2137         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2138                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2139         i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2140         i40e_vsi_enable_queues_intr(main_vsi);
2141
2142         /* Map VMDQ VSI queues with MSIX interrupt */
2143         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2144                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2145                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2146                                           I40E_ITR_INDEX_DEFAULT);
2147                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2148         }
2149
2150         /* enable FDIR MSIX interrupt */
2151         if (pf->fdir.fdir_vsi) {
2152                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2153                                           I40E_ITR_INDEX_NONE);
2154                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2155         }
2156
2157         /* Enable all queues which have been configured */
2158         ret = i40e_dev_switch_queues(pf, TRUE);
2159         if (ret != I40E_SUCCESS) {
2160                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2161                 goto err_up;
2162         }
2163
2164         /* Enable receiving broadcast packets */
2165         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2166         if (ret != I40E_SUCCESS)
2167                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2168
2169         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2170                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2171                                                 true, NULL);
2172                 if (ret != I40E_SUCCESS)
2173                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2174         }
2175
2176         /* Enable the VLAN promiscuous mode. */
2177         if (pf->vfs) {
2178                 for (i = 0; i < pf->vf_num; i++) {
2179                         vsi = pf->vfs[i].vsi;
2180                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2181                                                      true, NULL);
2182                 }
2183         }
2184
2185         /* Enable mac loopback mode */
2186         if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2187             dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2188                 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2189                 if (ret != I40E_SUCCESS) {
2190                         PMD_DRV_LOG(ERR, "fail to set loopback link");
2191                         goto err_up;
2192                 }
2193         }
2194
2195         /* Apply link configure */
2196         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
2197                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
2198                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
2199                                 ETH_LINK_SPEED_40G)) {
2200                 PMD_DRV_LOG(ERR, "Invalid link setting");
2201                 goto err_up;
2202         }
2203         ret = i40e_apply_link_speed(dev);
2204         if (I40E_SUCCESS != ret) {
2205                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2206                 goto err_up;
2207         }
2208
2209         if (!rte_intr_allow_others(intr_handle)) {
2210                 rte_intr_callback_unregister(intr_handle,
2211                                              i40e_dev_interrupt_handler,
2212                                              (void *)dev);
2213                 /* configure and enable device interrupt */
2214                 i40e_pf_config_irq0(hw, FALSE);
2215                 i40e_pf_enable_irq0(hw);
2216
2217                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2218                         PMD_INIT_LOG(INFO,
2219                                 "lsc won't enable because of no intr multiplex");
2220         } else {
2221                 ret = i40e_aq_set_phy_int_mask(hw,
2222                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2223                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2224                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2225                 if (ret != I40E_SUCCESS)
2226                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2227
2228                 /* Call get_link_info aq commond to enable/disable LSE */
2229                 i40e_dev_link_update(dev, 0);
2230         }
2231
2232         /* enable uio intr after callback register */
2233         rte_intr_enable(intr_handle);
2234
2235         i40e_filter_restore(pf);
2236
2237         if (pf->tm_conf.root && !pf->tm_conf.committed)
2238                 PMD_DRV_LOG(WARNING,
2239                             "please call hierarchy_commit() "
2240                             "before starting the port");
2241
2242         return I40E_SUCCESS;
2243
2244 err_up:
2245         i40e_dev_switch_queues(pf, FALSE);
2246         i40e_dev_clear_queues(dev);
2247
2248         return ret;
2249 }
2250
2251 static void
2252 i40e_dev_stop(struct rte_eth_dev *dev)
2253 {
2254         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2255         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2256         struct i40e_vsi *main_vsi = pf->main_vsi;
2257         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2258         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2259         int i;
2260
2261         if (hw->adapter_stopped == 1)
2262                 return;
2263         /* Disable all queues */
2264         i40e_dev_switch_queues(pf, FALSE);
2265
2266         /* un-map queues with interrupt registers */
2267         i40e_vsi_disable_queues_intr(main_vsi);
2268         i40e_vsi_queues_unbind_intr(main_vsi);
2269
2270         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2271                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2272                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2273         }
2274
2275         if (pf->fdir.fdir_vsi) {
2276                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2277                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2278         }
2279         /* Clear all queues and release memory */
2280         i40e_dev_clear_queues(dev);
2281
2282         /* Set link down */
2283         i40e_dev_set_link_down(dev);
2284
2285         if (!rte_intr_allow_others(intr_handle))
2286                 /* resume to the default handler */
2287                 rte_intr_callback_register(intr_handle,
2288                                            i40e_dev_interrupt_handler,
2289                                            (void *)dev);
2290
2291         /* Clean datapath event and queue/vec mapping */
2292         rte_intr_efd_disable(intr_handle);
2293         if (intr_handle->intr_vec) {
2294                 rte_free(intr_handle->intr_vec);
2295                 intr_handle->intr_vec = NULL;
2296         }
2297
2298         /* reset hierarchy commit */
2299         pf->tm_conf.committed = false;
2300
2301         hw->adapter_stopped = 1;
2302 }
2303
2304 static void
2305 i40e_dev_close(struct rte_eth_dev *dev)
2306 {
2307         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2308         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2309         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2310         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2311         struct i40e_mirror_rule *p_mirror;
2312         uint32_t reg;
2313         int i;
2314         int ret;
2315
2316         PMD_INIT_FUNC_TRACE();
2317
2318         i40e_dev_stop(dev);
2319
2320         /* Remove all mirror rules */
2321         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2322                 ret = i40e_aq_del_mirror_rule(hw,
2323                                               pf->main_vsi->veb->seid,
2324                                               p_mirror->rule_type,
2325                                               p_mirror->entries,
2326                                               p_mirror->num_entries,
2327                                               p_mirror->id);
2328                 if (ret < 0)
2329                         PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2330                                     "status = %d, aq_err = %d.", ret,
2331                                     hw->aq.asq_last_status);
2332
2333                 /* remove mirror software resource anyway */
2334                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2335                 rte_free(p_mirror);
2336                 pf->nb_mirror_rule--;
2337         }
2338
2339         i40e_dev_free_queues(dev);
2340
2341         /* Disable interrupt */
2342         i40e_pf_disable_irq0(hw);
2343         rte_intr_disable(intr_handle);
2344
2345         /* shutdown and destroy the HMC */
2346         i40e_shutdown_lan_hmc(hw);
2347
2348         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2349                 i40e_vsi_release(pf->vmdq[i].vsi);
2350                 pf->vmdq[i].vsi = NULL;
2351         }
2352         rte_free(pf->vmdq);
2353         pf->vmdq = NULL;
2354
2355         /* release all the existing VSIs and VEBs */
2356         i40e_fdir_teardown(pf);
2357         i40e_vsi_release(pf->main_vsi);
2358
2359         /* shutdown the adminq */
2360         i40e_aq_queue_shutdown(hw, true);
2361         i40e_shutdown_adminq(hw);
2362
2363         i40e_res_pool_destroy(&pf->qp_pool);
2364         i40e_res_pool_destroy(&pf->msix_pool);
2365
2366         /* Disable flexible payload in global configuration */
2367         if (!pf->support_multi_driver)
2368                 i40e_flex_payload_reg_set_default(hw);
2369
2370         /* force a PF reset to clean anything leftover */
2371         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2372         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2373                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2374         I40E_WRITE_FLUSH(hw);
2375 }
2376
2377 /*
2378  * Reset PF device only to re-initialize resources in PMD layer
2379  */
2380 static int
2381 i40e_dev_reset(struct rte_eth_dev *dev)
2382 {
2383         int ret;
2384
2385         /* When a DPDK PMD PF begin to reset PF port, it should notify all
2386          * its VF to make them align with it. The detailed notification
2387          * mechanism is PMD specific. As to i40e PF, it is rather complex.
2388          * To avoid unexpected behavior in VF, currently reset of PF with
2389          * SR-IOV activation is not supported. It might be supported later.
2390          */
2391         if (dev->data->sriov.active)
2392                 return -ENOTSUP;
2393
2394         ret = eth_i40e_dev_uninit(dev);
2395         if (ret)
2396                 return ret;
2397
2398         ret = eth_i40e_dev_init(dev, NULL);
2399
2400         return ret;
2401 }
2402
2403 static void
2404 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2405 {
2406         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2407         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2408         struct i40e_vsi *vsi = pf->main_vsi;
2409         int status;
2410
2411         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2412                                                      true, NULL, true);
2413         if (status != I40E_SUCCESS)
2414                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2415
2416         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2417                                                         TRUE, NULL);
2418         if (status != I40E_SUCCESS)
2419                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2420
2421 }
2422
2423 static void
2424 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2425 {
2426         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2427         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2428         struct i40e_vsi *vsi = pf->main_vsi;
2429         int status;
2430
2431         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2432                                                      false, NULL, true);
2433         if (status != I40E_SUCCESS)
2434                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2435
2436         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2437                                                         false, NULL);
2438         if (status != I40E_SUCCESS)
2439                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2440 }
2441
2442 static void
2443 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2444 {
2445         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2446         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2447         struct i40e_vsi *vsi = pf->main_vsi;
2448         int ret;
2449
2450         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2451         if (ret != I40E_SUCCESS)
2452                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2453 }
2454
2455 static void
2456 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2457 {
2458         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2459         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2460         struct i40e_vsi *vsi = pf->main_vsi;
2461         int ret;
2462
2463         if (dev->data->promiscuous == 1)
2464                 return; /* must remain in all_multicast mode */
2465
2466         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2467                                 vsi->seid, FALSE, NULL);
2468         if (ret != I40E_SUCCESS)
2469                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2470 }
2471
2472 /*
2473  * Set device link up.
2474  */
2475 static int
2476 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2477 {
2478         /* re-apply link speed setting */
2479         return i40e_apply_link_speed(dev);
2480 }
2481
2482 /*
2483  * Set device link down.
2484  */
2485 static int
2486 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2487 {
2488         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2489         uint8_t abilities = 0;
2490         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2491
2492         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2493         return i40e_phy_conf_link(hw, abilities, speed, false);
2494 }
2495
2496 static __rte_always_inline void
2497 update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
2498 {
2499 /* Link status registers and values*/
2500 #define I40E_PRTMAC_LINKSTA             0x001E2420
2501 #define I40E_REG_LINK_UP                0x40000080
2502 #define I40E_PRTMAC_MACC                0x001E24E0
2503 #define I40E_REG_MACC_25GB              0x00020000
2504 #define I40E_REG_SPEED_MASK             0x38000000
2505 #define I40E_REG_SPEED_100MB            0x00000000
2506 #define I40E_REG_SPEED_1GB              0x08000000
2507 #define I40E_REG_SPEED_10GB             0x10000000
2508 #define I40E_REG_SPEED_20GB             0x20000000
2509 #define I40E_REG_SPEED_25_40GB          0x18000000
2510         uint32_t link_speed;
2511         uint32_t reg_val;
2512
2513         reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2514         link_speed = reg_val & I40E_REG_SPEED_MASK;
2515         reg_val &= I40E_REG_LINK_UP;
2516         link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2517
2518         if (unlikely(link->link_status != 0))
2519                 return;
2520
2521         /* Parse the link status */
2522         switch (link_speed) {
2523         case I40E_REG_SPEED_100MB:
2524                 link->link_speed = ETH_SPEED_NUM_100M;
2525                 break;
2526         case I40E_REG_SPEED_1GB:
2527                 link->link_speed = ETH_SPEED_NUM_1G;
2528                 break;
2529         case I40E_REG_SPEED_10GB:
2530                 link->link_speed = ETH_SPEED_NUM_10G;
2531                 break;
2532         case I40E_REG_SPEED_20GB:
2533                 link->link_speed = ETH_SPEED_NUM_20G;
2534                 break;
2535         case I40E_REG_SPEED_25_40GB:
2536                 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2537
2538                 if (reg_val & I40E_REG_MACC_25GB)
2539                         link->link_speed = ETH_SPEED_NUM_25G;
2540                 else
2541                         link->link_speed = ETH_SPEED_NUM_40G;
2542
2543                 break;
2544         default:
2545                 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2546                 break;
2547         }
2548 }
2549
2550 static __rte_always_inline void
2551 update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
2552         bool enable_lse)
2553 {
2554 #define CHECK_INTERVAL             100  /* 100ms */
2555 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2556         uint32_t rep_cnt = MAX_REPEAT_TIME;
2557         struct i40e_link_status link_status;
2558         int status;
2559
2560         memset(&link_status, 0, sizeof(link_status));
2561
2562         do {
2563                 memset(&link_status, 0, sizeof(link_status));
2564
2565                 /* Get link status information from hardware */
2566                 status = i40e_aq_get_link_info(hw, enable_lse,
2567                                                 &link_status, NULL);
2568                 if (unlikely(status != I40E_SUCCESS)) {
2569                         link->link_speed = ETH_SPEED_NUM_100M;
2570                         link->link_duplex = ETH_LINK_FULL_DUPLEX;
2571                         PMD_DRV_LOG(ERR, "Failed to get link info");
2572                         return;
2573                 }
2574
2575                 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2576                 if (unlikely(link->link_status != 0))
2577                         return;
2578
2579                 rte_delay_ms(CHECK_INTERVAL);
2580         } while (--rep_cnt);
2581
2582         /* Parse the link status */
2583         switch (link_status.link_speed) {
2584         case I40E_LINK_SPEED_100MB:
2585                 link->link_speed = ETH_SPEED_NUM_100M;
2586                 break;
2587         case I40E_LINK_SPEED_1GB:
2588                 link->link_speed = ETH_SPEED_NUM_1G;
2589                 break;
2590         case I40E_LINK_SPEED_10GB:
2591                 link->link_speed = ETH_SPEED_NUM_10G;
2592                 break;
2593         case I40E_LINK_SPEED_20GB:
2594                 link->link_speed = ETH_SPEED_NUM_20G;
2595                 break;
2596         case I40E_LINK_SPEED_25GB:
2597                 link->link_speed = ETH_SPEED_NUM_25G;
2598                 break;
2599         case I40E_LINK_SPEED_40GB:
2600                 link->link_speed = ETH_SPEED_NUM_40G;
2601                 break;
2602         default:
2603                 link->link_speed = ETH_SPEED_NUM_100M;
2604                 break;
2605         }
2606 }
2607
2608 int
2609 i40e_dev_link_update(struct rte_eth_dev *dev,
2610                      int wait_to_complete)
2611 {
2612         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2613         struct rte_eth_link link;
2614         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2615         int ret;
2616
2617         memset(&link, 0, sizeof(link));
2618
2619         /* i40e uses full duplex only */
2620         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2621         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2622                         ETH_LINK_SPEED_FIXED);
2623
2624         if (!wait_to_complete)
2625                 update_link_no_wait(hw, &link);
2626         else
2627                 update_link_wait(hw, &link, enable_lse);
2628
2629         ret = rte_eth_linkstatus_set(dev, &link);
2630         i40e_notify_all_vfs_link_status(dev);
2631
2632         return ret;
2633 }
2634
2635 /* Get all the statistics of a VSI */
2636 void
2637 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2638 {
2639         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2640         struct i40e_eth_stats *nes = &vsi->eth_stats;
2641         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2642         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2643
2644         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2645                             vsi->offset_loaded, &oes->rx_bytes,
2646                             &nes->rx_bytes);
2647         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2648                             vsi->offset_loaded, &oes->rx_unicast,
2649                             &nes->rx_unicast);
2650         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2651                             vsi->offset_loaded, &oes->rx_multicast,
2652                             &nes->rx_multicast);
2653         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2654                             vsi->offset_loaded, &oes->rx_broadcast,
2655                             &nes->rx_broadcast);
2656         /* exclude CRC bytes */
2657         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2658                 nes->rx_broadcast) * ETHER_CRC_LEN;
2659
2660         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2661                             &oes->rx_discards, &nes->rx_discards);
2662         /* GLV_REPC not supported */
2663         /* GLV_RMPC not supported */
2664         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2665                             &oes->rx_unknown_protocol,
2666                             &nes->rx_unknown_protocol);
2667         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2668                             vsi->offset_loaded, &oes->tx_bytes,
2669                             &nes->tx_bytes);
2670         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2671                             vsi->offset_loaded, &oes->tx_unicast,
2672                             &nes->tx_unicast);
2673         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2674                             vsi->offset_loaded, &oes->tx_multicast,
2675                             &nes->tx_multicast);
2676         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2677                             vsi->offset_loaded,  &oes->tx_broadcast,
2678                             &nes->tx_broadcast);
2679         /* GLV_TDPC not supported */
2680         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2681                             &oes->tx_errors, &nes->tx_errors);
2682         vsi->offset_loaded = true;
2683
2684         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2685                     vsi->vsi_id);
2686         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2687         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2688         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2689         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2690         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2691         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2692                     nes->rx_unknown_protocol);
2693         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2694         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2695         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2696         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2697         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2698         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2699         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2700                     vsi->vsi_id);
2701 }
2702
2703 static void
2704 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2705 {
2706         unsigned int i;
2707         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2708         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2709
2710         /* Get rx/tx bytes of internal transfer packets */
2711         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2712                         I40E_GLV_GORCL(hw->port),
2713                         pf->offset_loaded,
2714                         &pf->internal_stats_offset.rx_bytes,
2715                         &pf->internal_stats.rx_bytes);
2716
2717         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2718                         I40E_GLV_GOTCL(hw->port),
2719                         pf->offset_loaded,
2720                         &pf->internal_stats_offset.tx_bytes,
2721                         &pf->internal_stats.tx_bytes);
2722         /* Get total internal rx packet count */
2723         i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2724                             I40E_GLV_UPRCL(hw->port),
2725                             pf->offset_loaded,
2726                             &pf->internal_stats_offset.rx_unicast,
2727                             &pf->internal_stats.rx_unicast);
2728         i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2729                             I40E_GLV_MPRCL(hw->port),
2730                             pf->offset_loaded,
2731                             &pf->internal_stats_offset.rx_multicast,
2732                             &pf->internal_stats.rx_multicast);
2733         i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2734                             I40E_GLV_BPRCL(hw->port),
2735                             pf->offset_loaded,
2736                             &pf->internal_stats_offset.rx_broadcast,
2737                             &pf->internal_stats.rx_broadcast);
2738         /* Get total internal tx packet count */
2739         i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2740                             I40E_GLV_UPTCL(hw->port),
2741                             pf->offset_loaded,
2742                             &pf->internal_stats_offset.tx_unicast,
2743                             &pf->internal_stats.tx_unicast);
2744         i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2745                             I40E_GLV_MPTCL(hw->port),
2746                             pf->offset_loaded,
2747                             &pf->internal_stats_offset.tx_multicast,
2748                             &pf->internal_stats.tx_multicast);
2749         i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2750                             I40E_GLV_BPTCL(hw->port),
2751                             pf->offset_loaded,
2752                             &pf->internal_stats_offset.tx_broadcast,
2753                             &pf->internal_stats.tx_broadcast);
2754
2755         /* exclude CRC size */
2756         pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2757                 pf->internal_stats.rx_multicast +
2758                 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2759
2760         /* Get statistics of struct i40e_eth_stats */
2761         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2762                             I40E_GLPRT_GORCL(hw->port),
2763                             pf->offset_loaded, &os->eth.rx_bytes,
2764                             &ns->eth.rx_bytes);
2765         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2766                             I40E_GLPRT_UPRCL(hw->port),
2767                             pf->offset_loaded, &os->eth.rx_unicast,
2768                             &ns->eth.rx_unicast);
2769         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2770                             I40E_GLPRT_MPRCL(hw->port),
2771                             pf->offset_loaded, &os->eth.rx_multicast,
2772                             &ns->eth.rx_multicast);
2773         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2774                             I40E_GLPRT_BPRCL(hw->port),
2775                             pf->offset_loaded, &os->eth.rx_broadcast,
2776                             &ns->eth.rx_broadcast);
2777         /* Workaround: CRC size should not be included in byte statistics,
2778          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2779          */
2780         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2781                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2782
2783         /* exclude internal rx bytes
2784          * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2785          * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2786          * value.
2787          * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2788          */
2789         if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2790                 ns->eth.rx_bytes = 0;
2791         else
2792                 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2793
2794         if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2795                 ns->eth.rx_unicast = 0;
2796         else
2797                 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2798
2799         if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2800                 ns->eth.rx_multicast = 0;
2801         else
2802                 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2803
2804         if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2805                 ns->eth.rx_broadcast = 0;
2806         else
2807                 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2808
2809         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2810                             pf->offset_loaded, &os->eth.rx_discards,
2811                             &ns->eth.rx_discards);
2812         /* GLPRT_REPC not supported */
2813         /* GLPRT_RMPC not supported */
2814         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2815                             pf->offset_loaded,
2816                             &os->eth.rx_unknown_protocol,
2817                             &ns->eth.rx_unknown_protocol);
2818         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2819                             I40E_GLPRT_GOTCL(hw->port),
2820                             pf->offset_loaded, &os->eth.tx_bytes,
2821                             &ns->eth.tx_bytes);
2822         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2823                             I40E_GLPRT_UPTCL(hw->port),
2824                             pf->offset_loaded, &os->eth.tx_unicast,
2825                             &ns->eth.tx_unicast);
2826         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2827                             I40E_GLPRT_MPTCL(hw->port),
2828                             pf->offset_loaded, &os->eth.tx_multicast,
2829                             &ns->eth.tx_multicast);
2830         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2831                             I40E_GLPRT_BPTCL(hw->port),
2832                             pf->offset_loaded, &os->eth.tx_broadcast,
2833                             &ns->eth.tx_broadcast);
2834         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2835                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2836
2837         /* exclude internal tx bytes
2838          * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2839          * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2840          * value.
2841          * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2842          */
2843         if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2844                 ns->eth.tx_bytes = 0;
2845         else
2846                 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2847
2848         if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2849                 ns->eth.tx_unicast = 0;
2850         else
2851                 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2852
2853         if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2854                 ns->eth.tx_multicast = 0;
2855         else
2856                 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2857
2858         if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2859                 ns->eth.tx_broadcast = 0;
2860         else
2861                 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2862
2863         /* GLPRT_TEPC not supported */
2864
2865         /* additional port specific stats */
2866         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2867                             pf->offset_loaded, &os->tx_dropped_link_down,
2868                             &ns->tx_dropped_link_down);
2869         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2870                             pf->offset_loaded, &os->crc_errors,
2871                             &ns->crc_errors);
2872         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2873                             pf->offset_loaded, &os->illegal_bytes,
2874                             &ns->illegal_bytes);
2875         /* GLPRT_ERRBC not supported */
2876         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2877                             pf->offset_loaded, &os->mac_local_faults,
2878                             &ns->mac_local_faults);
2879         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2880                             pf->offset_loaded, &os->mac_remote_faults,
2881                             &ns->mac_remote_faults);
2882         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2883                             pf->offset_loaded, &os->rx_length_errors,
2884                             &ns->rx_length_errors);
2885         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2886                             pf->offset_loaded, &os->link_xon_rx,
2887                             &ns->link_xon_rx);
2888         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2889                             pf->offset_loaded, &os->link_xoff_rx,
2890                             &ns->link_xoff_rx);
2891         for (i = 0; i < 8; i++) {
2892                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2893                                     pf->offset_loaded,
2894                                     &os->priority_xon_rx[i],
2895                                     &ns->priority_xon_rx[i]);
2896                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2897                                     pf->offset_loaded,
2898                                     &os->priority_xoff_rx[i],
2899                                     &ns->priority_xoff_rx[i]);
2900         }
2901         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2902                             pf->offset_loaded, &os->link_xon_tx,
2903                             &ns->link_xon_tx);
2904         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2905                             pf->offset_loaded, &os->link_xoff_tx,
2906                             &ns->link_xoff_tx);
2907         for (i = 0; i < 8; i++) {
2908                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2909                                     pf->offset_loaded,
2910                                     &os->priority_xon_tx[i],
2911                                     &ns->priority_xon_tx[i]);
2912                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2913                                     pf->offset_loaded,
2914                                     &os->priority_xoff_tx[i],
2915                                     &ns->priority_xoff_tx[i]);
2916                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2917                                     pf->offset_loaded,
2918                                     &os->priority_xon_2_xoff[i],
2919                                     &ns->priority_xon_2_xoff[i]);
2920         }
2921         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2922                             I40E_GLPRT_PRC64L(hw->port),
2923                             pf->offset_loaded, &os->rx_size_64,
2924                             &ns->rx_size_64);
2925         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2926                             I40E_GLPRT_PRC127L(hw->port),
2927                             pf->offset_loaded, &os->rx_size_127,
2928                             &ns->rx_size_127);
2929         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2930                             I40E_GLPRT_PRC255L(hw->port),
2931                             pf->offset_loaded, &os->rx_size_255,
2932                             &ns->rx_size_255);
2933         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2934                             I40E_GLPRT_PRC511L(hw->port),
2935                             pf->offset_loaded, &os->rx_size_511,
2936                             &ns->rx_size_511);
2937         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2938                             I40E_GLPRT_PRC1023L(hw->port),
2939                             pf->offset_loaded, &os->rx_size_1023,
2940                             &ns->rx_size_1023);
2941         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2942                             I40E_GLPRT_PRC1522L(hw->port),
2943                             pf->offset_loaded, &os->rx_size_1522,
2944                             &ns->rx_size_1522);
2945         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2946                             I40E_GLPRT_PRC9522L(hw->port),
2947                             pf->offset_loaded, &os->rx_size_big,
2948                             &ns->rx_size_big);
2949         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2950                             pf->offset_loaded, &os->rx_undersize,
2951                             &ns->rx_undersize);
2952         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2953                             pf->offset_loaded, &os->rx_fragments,
2954                             &ns->rx_fragments);
2955         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2956                             pf->offset_loaded, &os->rx_oversize,
2957                             &ns->rx_oversize);
2958         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2959                             pf->offset_loaded, &os->rx_jabber,
2960                             &ns->rx_jabber);
2961         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2962                             I40E_GLPRT_PTC64L(hw->port),
2963                             pf->offset_loaded, &os->tx_size_64,
2964                             &ns->tx_size_64);
2965         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2966                             I40E_GLPRT_PTC127L(hw->port),
2967                             pf->offset_loaded, &os->tx_size_127,
2968                             &ns->tx_size_127);
2969         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2970                             I40E_GLPRT_PTC255L(hw->port),
2971                             pf->offset_loaded, &os->tx_size_255,
2972                             &ns->tx_size_255);
2973         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2974                             I40E_GLPRT_PTC511L(hw->port),
2975                             pf->offset_loaded, &os->tx_size_511,
2976                             &ns->tx_size_511);
2977         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2978                             I40E_GLPRT_PTC1023L(hw->port),
2979                             pf->offset_loaded, &os->tx_size_1023,
2980                             &ns->tx_size_1023);
2981         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2982                             I40E_GLPRT_PTC1522L(hw->port),
2983                             pf->offset_loaded, &os->tx_size_1522,
2984                             &ns->tx_size_1522);
2985         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2986                             I40E_GLPRT_PTC9522L(hw->port),
2987                             pf->offset_loaded, &os->tx_size_big,
2988                             &ns->tx_size_big);
2989         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2990                            pf->offset_loaded,
2991                            &os->fd_sb_match, &ns->fd_sb_match);
2992         /* GLPRT_MSPDC not supported */
2993         /* GLPRT_XEC not supported */
2994
2995         pf->offset_loaded = true;
2996
2997         if (pf->main_vsi)
2998                 i40e_update_vsi_stats(pf->main_vsi);
2999 }
3000
3001 /* Get all statistics of a port */
3002 static int
3003 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3004 {
3005         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3006         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3007         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3008         unsigned i;
3009
3010         /* call read registers - updates values, now write them to struct */
3011         i40e_read_stats_registers(pf, hw);
3012
3013         stats->ipackets = ns->eth.rx_unicast +
3014                         ns->eth.rx_multicast +
3015                         ns->eth.rx_broadcast -
3016                         ns->eth.rx_discards -
3017                         pf->main_vsi->eth_stats.rx_discards;
3018         stats->opackets = ns->eth.tx_unicast +
3019                         ns->eth.tx_multicast +
3020                         ns->eth.tx_broadcast;
3021         stats->ibytes   = ns->eth.rx_bytes;
3022         stats->obytes   = ns->eth.tx_bytes;
3023         stats->oerrors  = ns->eth.tx_errors +
3024                         pf->main_vsi->eth_stats.tx_errors;
3025
3026         /* Rx Errors */
3027         stats->imissed  = ns->eth.rx_discards +
3028                         pf->main_vsi->eth_stats.rx_discards;
3029         stats->ierrors  = ns->crc_errors +
3030                         ns->rx_length_errors + ns->rx_undersize +
3031                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3032
3033         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3034         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3035         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3036         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3037         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3038         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3039         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3040                     ns->eth.rx_unknown_protocol);
3041         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3042         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3043         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3044         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3045         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3046         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3047
3048         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3049                     ns->tx_dropped_link_down);
3050         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3051         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3052                     ns->illegal_bytes);
3053         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3054         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3055                     ns->mac_local_faults);
3056         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3057                     ns->mac_remote_faults);
3058         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3059                     ns->rx_length_errors);
3060         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3061         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3062         for (i = 0; i < 8; i++) {
3063                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3064                                 i, ns->priority_xon_rx[i]);
3065                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3066                                 i, ns->priority_xoff_rx[i]);
3067         }
3068         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3069         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3070         for (i = 0; i < 8; i++) {
3071                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3072                                 i, ns->priority_xon_tx[i]);
3073                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3074                                 i, ns->priority_xoff_tx[i]);
3075                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3076                                 i, ns->priority_xon_2_xoff[i]);
3077         }
3078         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3079         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3080         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3081         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3082         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3083         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3084         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3085         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3086         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3087         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3088         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3089         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3090         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3091         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3092         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3093         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3094         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3095         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3096         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3097                         ns->mac_short_packet_dropped);
3098         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3099                     ns->checksum_error);
3100         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3101         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3102         return 0;
3103 }
3104
3105 /* Reset the statistics */
3106 static void
3107 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3108 {
3109         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3110         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3111
3112         /* Mark PF and VSI stats to update the offset, aka "reset" */
3113         pf->offset_loaded = false;
3114         if (pf->main_vsi)
3115                 pf->main_vsi->offset_loaded = false;
3116
3117         /* read the stats, reading current register values into offset */
3118         i40e_read_stats_registers(pf, hw);
3119 }
3120
3121 static uint32_t
3122 i40e_xstats_calc_num(void)
3123 {
3124         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3125                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3126                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3127 }
3128
3129 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3130                                      struct rte_eth_xstat_name *xstats_names,
3131                                      __rte_unused unsigned limit)
3132 {
3133         unsigned count = 0;
3134         unsigned i, prio;
3135
3136         if (xstats_names == NULL)
3137                 return i40e_xstats_calc_num();
3138
3139         /* Note: limit checked in rte_eth_xstats_names() */
3140
3141         /* Get stats from i40e_eth_stats struct */
3142         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3143                 snprintf(xstats_names[count].name,
3144                          sizeof(xstats_names[count].name),
3145                          "%s", rte_i40e_stats_strings[i].name);
3146                 count++;
3147         }
3148
3149         /* Get individiual stats from i40e_hw_port struct */
3150         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3151                 snprintf(xstats_names[count].name,
3152                         sizeof(xstats_names[count].name),
3153                          "%s", rte_i40e_hw_port_strings[i].name);
3154                 count++;
3155         }
3156
3157         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3158                 for (prio = 0; prio < 8; prio++) {
3159                         snprintf(xstats_names[count].name,
3160                                  sizeof(xstats_names[count].name),
3161                                  "rx_priority%u_%s", prio,
3162                                  rte_i40e_rxq_prio_strings[i].name);
3163                         count++;
3164                 }
3165         }
3166
3167         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3168                 for (prio = 0; prio < 8; prio++) {
3169                         snprintf(xstats_names[count].name,
3170                                  sizeof(xstats_names[count].name),
3171                                  "tx_priority%u_%s", prio,
3172                                  rte_i40e_txq_prio_strings[i].name);
3173                         count++;
3174                 }
3175         }
3176         return count;
3177 }
3178
3179 static int
3180 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3181                     unsigned n)
3182 {
3183         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3184         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3185         unsigned i, count, prio;
3186         struct i40e_hw_port_stats *hw_stats = &pf->stats;
3187
3188         count = i40e_xstats_calc_num();
3189         if (n < count)
3190                 return count;
3191
3192         i40e_read_stats_registers(pf, hw);
3193
3194         if (xstats == NULL)
3195                 return 0;
3196
3197         count = 0;
3198
3199         /* Get stats from i40e_eth_stats struct */
3200         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3201                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3202                         rte_i40e_stats_strings[i].offset);
3203                 xstats[count].id = count;
3204                 count++;
3205         }
3206
3207         /* Get individiual stats from i40e_hw_port struct */
3208         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3209                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3210                         rte_i40e_hw_port_strings[i].offset);
3211                 xstats[count].id = count;
3212                 count++;
3213         }
3214
3215         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3216                 for (prio = 0; prio < 8; prio++) {
3217                         xstats[count].value =
3218                                 *(uint64_t *)(((char *)hw_stats) +
3219                                 rte_i40e_rxq_prio_strings[i].offset +
3220                                 (sizeof(uint64_t) * prio));
3221                         xstats[count].id = count;
3222                         count++;
3223                 }
3224         }
3225
3226         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3227                 for (prio = 0; prio < 8; prio++) {
3228                         xstats[count].value =
3229                                 *(uint64_t *)(((char *)hw_stats) +
3230                                 rte_i40e_txq_prio_strings[i].offset +
3231                                 (sizeof(uint64_t) * prio));
3232                         xstats[count].id = count;
3233                         count++;
3234                 }
3235         }
3236
3237         return count;
3238 }
3239
3240 static int
3241 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3242                                  __rte_unused uint16_t queue_id,
3243                                  __rte_unused uint8_t stat_idx,
3244                                  __rte_unused uint8_t is_rx)
3245 {
3246         PMD_INIT_FUNC_TRACE();
3247
3248         return -ENOSYS;
3249 }
3250
3251 static int
3252 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3253 {
3254         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3255         u32 full_ver;
3256         u8 ver, patch;
3257         u16 build;
3258         int ret;
3259
3260         full_ver = hw->nvm.oem_ver;
3261         ver = (u8)(full_ver >> 24);
3262         build = (u16)((full_ver >> 8) & 0xffff);
3263         patch = (u8)(full_ver & 0xff);
3264
3265         ret = snprintf(fw_version, fw_size,
3266                  "%d.%d%d 0x%08x %d.%d.%d",
3267                  ((hw->nvm.version >> 12) & 0xf),
3268                  ((hw->nvm.version >> 4) & 0xff),
3269                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
3270                  ver, build, patch);
3271
3272         ret += 1; /* add the size of '\0' */
3273         if (fw_size < (u32)ret)
3274                 return ret;
3275         else
3276                 return 0;
3277 }
3278
3279 static void
3280 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3281 {
3282         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3283         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3284         struct i40e_vsi *vsi = pf->main_vsi;
3285         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3286
3287         dev_info->max_rx_queues = vsi->nb_qps;
3288         dev_info->max_tx_queues = vsi->nb_qps;
3289         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3290         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3291         dev_info->max_mac_addrs = vsi->max_macaddrs;
3292         dev_info->max_vfs = pci_dev->max_vfs;
3293         dev_info->rx_queue_offload_capa = 0;
3294         dev_info->rx_offload_capa =
3295                 DEV_RX_OFFLOAD_VLAN_STRIP |
3296                 DEV_RX_OFFLOAD_QINQ_STRIP |
3297                 DEV_RX_OFFLOAD_IPV4_CKSUM |
3298                 DEV_RX_OFFLOAD_UDP_CKSUM |
3299                 DEV_RX_OFFLOAD_TCP_CKSUM |
3300                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3301                 DEV_RX_OFFLOAD_CRC_STRIP |
3302                 DEV_RX_OFFLOAD_VLAN_EXTEND |
3303                 DEV_RX_OFFLOAD_VLAN_FILTER;
3304
3305         dev_info->tx_queue_offload_capa = 0;
3306         dev_info->tx_offload_capa =
3307                 DEV_TX_OFFLOAD_VLAN_INSERT |
3308                 DEV_TX_OFFLOAD_QINQ_INSERT |
3309                 DEV_TX_OFFLOAD_IPV4_CKSUM |
3310                 DEV_TX_OFFLOAD_UDP_CKSUM |
3311                 DEV_TX_OFFLOAD_TCP_CKSUM |
3312                 DEV_TX_OFFLOAD_SCTP_CKSUM |
3313                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3314                 DEV_TX_OFFLOAD_TCP_TSO |
3315                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3316                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3317                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3318                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
3319         dev_info->dev_capa =
3320                 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3321                 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3322
3323         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3324                                                 sizeof(uint32_t);
3325         dev_info->reta_size = pf->hash_lut_size;
3326         dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3327
3328         dev_info->default_rxconf = (struct rte_eth_rxconf) {
3329                 .rx_thresh = {
3330                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
3331                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
3332                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
3333                 },
3334                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3335                 .rx_drop_en = 0,
3336                 .offloads = 0,
3337         };
3338
3339         dev_info->default_txconf = (struct rte_eth_txconf) {
3340                 .tx_thresh = {
3341                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
3342                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
3343                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
3344                 },
3345                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3346                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3347                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
3348                                 ETH_TXQ_FLAGS_NOOFFLOADS,
3349         };
3350
3351         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3352                 .nb_max = I40E_MAX_RING_DESC,
3353                 .nb_min = I40E_MIN_RING_DESC,
3354                 .nb_align = I40E_ALIGN_RING_DESC,
3355         };
3356
3357         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3358                 .nb_max = I40E_MAX_RING_DESC,
3359                 .nb_min = I40E_MIN_RING_DESC,
3360                 .nb_align = I40E_ALIGN_RING_DESC,
3361                 .nb_seg_max = I40E_TX_MAX_SEG,
3362                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3363         };
3364
3365         if (pf->flags & I40E_FLAG_VMDQ) {
3366                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3367                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3368                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3369                                                 pf->max_nb_vmdq_vsi;
3370                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3371                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3372                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3373         }
3374
3375         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3376                 /* For XL710 */
3377                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3378                 dev_info->default_rxportconf.nb_queues = 2;
3379                 dev_info->default_txportconf.nb_queues = 2;
3380                 if (dev->data->nb_rx_queues == 1)
3381                         dev_info->default_rxportconf.ring_size = 2048;
3382                 else
3383                         dev_info->default_rxportconf.ring_size = 1024;
3384                 if (dev->data->nb_tx_queues == 1)
3385                         dev_info->default_txportconf.ring_size = 1024;
3386                 else
3387                         dev_info->default_txportconf.ring_size = 512;
3388
3389         } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3390                 /* For XXV710 */
3391                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3392                 dev_info->default_rxportconf.nb_queues = 1;
3393                 dev_info->default_txportconf.nb_queues = 1;
3394                 dev_info->default_rxportconf.ring_size = 256;
3395                 dev_info->default_txportconf.ring_size = 256;
3396         } else {
3397                 /* For X710 */
3398                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3399                 dev_info->default_rxportconf.nb_queues = 1;
3400                 dev_info->default_txportconf.nb_queues = 1;
3401                 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3402                         dev_info->default_rxportconf.ring_size = 512;
3403                         dev_info->default_txportconf.ring_size = 256;
3404                 } else {
3405                         dev_info->default_rxportconf.ring_size = 256;
3406                         dev_info->default_txportconf.ring_size = 256;
3407                 }
3408         }
3409         dev_info->default_rxportconf.burst_size = 32;
3410         dev_info->default_txportconf.burst_size = 32;
3411 }
3412
3413 static int
3414 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3415 {
3416         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3417         struct i40e_vsi *vsi = pf->main_vsi;
3418         PMD_INIT_FUNC_TRACE();
3419
3420         if (on)
3421                 return i40e_vsi_add_vlan(vsi, vlan_id);
3422         else
3423                 return i40e_vsi_delete_vlan(vsi, vlan_id);
3424 }
3425
3426 static int
3427 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3428                                 enum rte_vlan_type vlan_type,
3429                                 uint16_t tpid, int qinq)
3430 {
3431         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3432         uint64_t reg_r = 0;
3433         uint64_t reg_w = 0;
3434         uint16_t reg_id = 3;
3435         int ret;
3436
3437         if (qinq) {
3438                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3439                         reg_id = 2;
3440         }
3441
3442         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3443                                           &reg_r, NULL);
3444         if (ret != I40E_SUCCESS) {
3445                 PMD_DRV_LOG(ERR,
3446                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3447                            reg_id);
3448                 return -EIO;
3449         }
3450         PMD_DRV_LOG(DEBUG,
3451                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3452                     reg_id, reg_r);
3453
3454         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3455         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3456         if (reg_r == reg_w) {
3457                 PMD_DRV_LOG(DEBUG, "No need to write");
3458                 return 0;
3459         }
3460
3461         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3462                                            reg_w, NULL);
3463         if (ret != I40E_SUCCESS) {
3464                 PMD_DRV_LOG(ERR,
3465                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3466                             reg_id);
3467                 return -EIO;
3468         }
3469         PMD_DRV_LOG(DEBUG,
3470                     "Global register 0x%08x is changed with value 0x%08x",
3471                     I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3472
3473         return 0;
3474 }
3475
3476 static int
3477 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3478                    enum rte_vlan_type vlan_type,
3479                    uint16_t tpid)
3480 {
3481         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3482         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3483         int qinq = dev->data->dev_conf.rxmode.offloads &
3484                    DEV_RX_OFFLOAD_VLAN_EXTEND;
3485         int ret = 0;
3486
3487         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3488              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3489             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3490                 PMD_DRV_LOG(ERR,
3491                             "Unsupported vlan type.");
3492                 return -EINVAL;
3493         }
3494
3495         if (pf->support_multi_driver) {
3496                 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3497                 return -ENOTSUP;
3498         }
3499
3500         /* 802.1ad frames ability is added in NVM API 1.7*/
3501         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3502                 if (qinq) {
3503                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3504                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3505                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3506                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3507                 } else {
3508                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3509                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3510                 }
3511                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3512                 if (ret != I40E_SUCCESS) {
3513                         PMD_DRV_LOG(ERR,
3514                                     "Set switch config failed aq_err: %d",
3515                                     hw->aq.asq_last_status);
3516                         ret = -EIO;
3517                 }
3518         } else
3519                 /* If NVM API < 1.7, keep the register setting */
3520                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3521                                                       tpid, qinq);
3522         i40e_global_cfg_warning(I40E_WARNING_TPID);
3523
3524         return ret;
3525 }
3526
3527 static int
3528 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3529 {
3530         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3531         struct i40e_vsi *vsi = pf->main_vsi;
3532         struct rte_eth_rxmode *rxmode;
3533
3534         rxmode = &dev->data->dev_conf.rxmode;
3535         if (mask & ETH_VLAN_FILTER_MASK) {
3536                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3537                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3538                 else
3539                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3540         }
3541
3542         if (mask & ETH_VLAN_STRIP_MASK) {
3543                 /* Enable or disable VLAN stripping */
3544                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3545                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3546                 else
3547                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3548         }
3549
3550         if (mask & ETH_VLAN_EXTEND_MASK) {
3551                 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
3552                         i40e_vsi_config_double_vlan(vsi, TRUE);
3553                         /* Set global registers with default ethertype. */
3554                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3555                                            ETHER_TYPE_VLAN);
3556                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3557                                            ETHER_TYPE_VLAN);
3558                 }
3559                 else
3560                         i40e_vsi_config_double_vlan(vsi, FALSE);
3561         }
3562
3563         return 0;
3564 }
3565
3566 static void
3567 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3568                           __rte_unused uint16_t queue,
3569                           __rte_unused int on)
3570 {
3571         PMD_INIT_FUNC_TRACE();
3572 }
3573
3574 static int
3575 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3576 {
3577         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3578         struct i40e_vsi *vsi = pf->main_vsi;
3579         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3580         struct i40e_vsi_vlan_pvid_info info;
3581
3582         memset(&info, 0, sizeof(info));
3583         info.on = on;
3584         if (info.on)
3585                 info.config.pvid = pvid;
3586         else {
3587                 info.config.reject.tagged =
3588                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3589                 info.config.reject.untagged =
3590                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3591         }
3592
3593         return i40e_vsi_vlan_pvid_set(vsi, &info);
3594 }
3595
3596 static int
3597 i40e_dev_led_on(struct rte_eth_dev *dev)
3598 {
3599         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3600         uint32_t mode = i40e_led_get(hw);
3601
3602         if (mode == 0)
3603                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3604
3605         return 0;
3606 }
3607
3608 static int
3609 i40e_dev_led_off(struct rte_eth_dev *dev)
3610 {
3611         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3612         uint32_t mode = i40e_led_get(hw);
3613
3614         if (mode != 0)
3615                 i40e_led_set(hw, 0, false);
3616
3617         return 0;
3618 }
3619
3620 static int
3621 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3622 {
3623         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3624         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3625
3626         fc_conf->pause_time = pf->fc_conf.pause_time;
3627
3628         /* read out from register, in case they are modified by other port */
3629         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3630                 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3631         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3632                 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3633
3634         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3635         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3636
3637          /* Return current mode according to actual setting*/
3638         switch (hw->fc.current_mode) {
3639         case I40E_FC_FULL:
3640                 fc_conf->mode = RTE_FC_FULL;
3641                 break;
3642         case I40E_FC_TX_PAUSE:
3643                 fc_conf->mode = RTE_FC_TX_PAUSE;
3644                 break;
3645         case I40E_FC_RX_PAUSE:
3646                 fc_conf->mode = RTE_FC_RX_PAUSE;
3647                 break;
3648         case I40E_FC_NONE:
3649         default:
3650                 fc_conf->mode = RTE_FC_NONE;
3651         };
3652
3653         return 0;
3654 }
3655
3656 static int
3657 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3658 {
3659         uint32_t mflcn_reg, fctrl_reg, reg;
3660         uint32_t max_high_water;
3661         uint8_t i, aq_failure;
3662         int err;
3663         struct i40e_hw *hw;
3664         struct i40e_pf *pf;
3665         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3666                 [RTE_FC_NONE] = I40E_FC_NONE,
3667                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3668                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3669                 [RTE_FC_FULL] = I40E_FC_FULL
3670         };
3671
3672         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3673
3674         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3675         if ((fc_conf->high_water > max_high_water) ||
3676                         (fc_conf->high_water < fc_conf->low_water)) {
3677                 PMD_INIT_LOG(ERR,
3678                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3679                         max_high_water);
3680                 return -EINVAL;
3681         }
3682
3683         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3684         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3685         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3686
3687         pf->fc_conf.pause_time = fc_conf->pause_time;
3688         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3689         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3690
3691         PMD_INIT_FUNC_TRACE();
3692
3693         /* All the link flow control related enable/disable register
3694          * configuration is handle by the F/W
3695          */
3696         err = i40e_set_fc(hw, &aq_failure, true);
3697         if (err < 0)
3698                 return -ENOSYS;
3699
3700         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3701                 /* Configure flow control refresh threshold,
3702                  * the value for stat_tx_pause_refresh_timer[8]
3703                  * is used for global pause operation.
3704                  */
3705
3706                 I40E_WRITE_REG(hw,
3707                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3708                                pf->fc_conf.pause_time);
3709
3710                 /* configure the timer value included in transmitted pause
3711                  * frame,
3712                  * the value for stat_tx_pause_quanta[8] is used for global
3713                  * pause operation
3714                  */
3715                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3716                                pf->fc_conf.pause_time);
3717
3718                 fctrl_reg = I40E_READ_REG(hw,
3719                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3720
3721                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3722                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3723                 else
3724                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3725
3726                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3727                                fctrl_reg);
3728         } else {
3729                 /* Configure pause time (2 TCs per register) */
3730                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3731                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3732                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3733
3734                 /* Configure flow control refresh threshold value */
3735                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3736                                pf->fc_conf.pause_time / 2);
3737
3738                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3739
3740                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3741                  *depending on configuration
3742                  */
3743                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3744                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3745                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3746                 } else {
3747                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3748                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3749                 }
3750
3751                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3752         }
3753
3754         if (!pf->support_multi_driver) {
3755                 /* config water marker both based on the packets and bytes */
3756                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3757                                  (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3758                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3759                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3760                                   (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3761                                  << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3762                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3763                                   pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3764                                   << I40E_KILOSHIFT);
3765                 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3766                                    pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3767                                    << I40E_KILOSHIFT);
3768                 i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
3769         } else {
3770                 PMD_DRV_LOG(ERR,
3771                             "Water marker configuration is not supported.");
3772         }
3773
3774         I40E_WRITE_FLUSH(hw);
3775
3776         return 0;
3777 }
3778
3779 static int
3780 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3781                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3782 {
3783         PMD_INIT_FUNC_TRACE();
3784
3785         return -ENOSYS;
3786 }
3787
3788 /* Add a MAC address, and update filters */
3789 static int
3790 i40e_macaddr_add(struct rte_eth_dev *dev,
3791                  struct ether_addr *mac_addr,
3792                  __rte_unused uint32_t index,
3793                  uint32_t pool)
3794 {
3795         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3796         struct i40e_mac_filter_info mac_filter;
3797         struct i40e_vsi *vsi;
3798         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
3799         int ret;
3800
3801         /* If VMDQ not enabled or configured, return */
3802         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3803                           !pf->nb_cfg_vmdq_vsi)) {
3804                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3805                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3806                         pool);
3807                 return -ENOTSUP;
3808         }
3809
3810         if (pool > pf->nb_cfg_vmdq_vsi) {
3811                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3812                                 pool, pf->nb_cfg_vmdq_vsi);
3813                 return -EINVAL;
3814         }
3815
3816         rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3817         if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3818                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3819         else
3820                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3821
3822         if (pool == 0)
3823                 vsi = pf->main_vsi;
3824         else
3825                 vsi = pf->vmdq[pool - 1].vsi;
3826
3827         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3828         if (ret != I40E_SUCCESS) {
3829                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3830                 return -ENODEV;
3831         }
3832         return 0;
3833 }
3834
3835 /* Remove a MAC address, and update filters */
3836 static void
3837 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3838 {
3839         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3840         struct i40e_vsi *vsi;
3841         struct rte_eth_dev_data *data = dev->data;
3842         struct ether_addr *macaddr;
3843         int ret;
3844         uint32_t i;
3845         uint64_t pool_sel;
3846
3847         macaddr = &(data->mac_addrs[index]);
3848
3849         pool_sel = dev->data->mac_pool_sel[index];
3850
3851         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3852                 if (pool_sel & (1ULL << i)) {
3853                         if (i == 0)
3854                                 vsi = pf->main_vsi;
3855                         else {
3856                                 /* No VMDQ pool enabled or configured */
3857                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3858                                         (i > pf->nb_cfg_vmdq_vsi)) {
3859                                         PMD_DRV_LOG(ERR,
3860                                                 "No VMDQ pool enabled/configured");
3861                                         return;
3862                                 }
3863                                 vsi = pf->vmdq[i - 1].vsi;
3864                         }
3865                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3866
3867                         if (ret) {
3868                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3869                                 return;
3870                         }
3871                 }
3872         }
3873 }
3874
3875 /* Set perfect match or hash match of MAC and VLAN for a VF */
3876 static int
3877 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3878                  struct rte_eth_mac_filter *filter,
3879                  bool add)
3880 {
3881         struct i40e_hw *hw;
3882         struct i40e_mac_filter_info mac_filter;
3883         struct ether_addr old_mac;
3884         struct ether_addr *new_mac;
3885         struct i40e_pf_vf *vf = NULL;
3886         uint16_t vf_id;
3887         int ret;
3888
3889         if (pf == NULL) {
3890                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3891                 return -EINVAL;
3892         }
3893         hw = I40E_PF_TO_HW(pf);
3894
3895         if (filter == NULL) {
3896                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3897                 return -EINVAL;
3898         }
3899
3900         new_mac = &filter->mac_addr;
3901
3902         if (is_zero_ether_addr(new_mac)) {
3903                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3904                 return -EINVAL;
3905         }
3906
3907         vf_id = filter->dst_id;
3908
3909         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3910                 PMD_DRV_LOG(ERR, "Invalid argument.");
3911                 return -EINVAL;
3912         }
3913         vf = &pf->vfs[vf_id];
3914
3915         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3916                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3917                 return -EINVAL;
3918         }
3919
3920         if (add) {
3921                 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3922                 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3923                                 ETHER_ADDR_LEN);
3924                 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3925                                  ETHER_ADDR_LEN);
3926
3927                 mac_filter.filter_type = filter->filter_type;
3928                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3929                 if (ret != I40E_SUCCESS) {
3930                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3931                         return -1;
3932                 }
3933                 ether_addr_copy(new_mac, &pf->dev_addr);
3934         } else {
3935                 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3936                                 ETHER_ADDR_LEN);
3937                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3938                 if (ret != I40E_SUCCESS) {
3939                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3940                         return -1;
3941                 }
3942
3943                 /* Clear device address as it has been removed */
3944                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3945                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3946         }
3947
3948         return 0;
3949 }
3950
3951 /* MAC filter handle */
3952 static int
3953 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3954                 void *arg)
3955 {
3956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3957         struct rte_eth_mac_filter *filter;
3958         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3959         int ret = I40E_NOT_SUPPORTED;
3960
3961         filter = (struct rte_eth_mac_filter *)(arg);
3962
3963         switch (filter_op) {
3964         case RTE_ETH_FILTER_NOP:
3965                 ret = I40E_SUCCESS;
3966                 break;
3967         case RTE_ETH_FILTER_ADD:
3968                 i40e_pf_disable_irq0(hw);
3969                 if (filter->is_vf)
3970                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3971                 i40e_pf_enable_irq0(hw);
3972                 break;
3973         case RTE_ETH_FILTER_DELETE:
3974                 i40e_pf_disable_irq0(hw);
3975                 if (filter->is_vf)
3976                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3977                 i40e_pf_enable_irq0(hw);
3978                 break;
3979         default:
3980                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3981                 ret = I40E_ERR_PARAM;
3982                 break;
3983         }
3984
3985         return ret;
3986 }
3987
3988 static int
3989 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3990 {
3991         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3992         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3993         uint32_t reg;
3994         int ret;
3995
3996         if (!lut)
3997                 return -EINVAL;
3998
3999         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4000                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
4001                                           lut, lut_size);
4002                 if (ret) {
4003                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4004                         return ret;
4005                 }
4006         } else {
4007                 uint32_t *lut_dw = (uint32_t *)lut;
4008                 uint16_t i, lut_size_dw = lut_size / 4;
4009
4010                 if (vsi->type == I40E_VSI_SRIOV) {
4011                         for (i = 0; i <= lut_size_dw; i++) {
4012                                 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4013                                 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4014                         }
4015                 } else {
4016                         for (i = 0; i < lut_size_dw; i++)
4017                                 lut_dw[i] = I40E_READ_REG(hw,
4018                                                           I40E_PFQF_HLUT(i));
4019                 }
4020         }
4021
4022         return 0;
4023 }
4024
4025 int
4026 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4027 {
4028         struct i40e_pf *pf;
4029         struct i40e_hw *hw;
4030         int ret;
4031
4032         if (!vsi || !lut)
4033                 return -EINVAL;
4034
4035         pf = I40E_VSI_TO_PF(vsi);
4036         hw = I40E_VSI_TO_HW(vsi);
4037
4038         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4039                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
4040                                           lut, lut_size);
4041                 if (ret) {
4042                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4043                         return ret;
4044                 }
4045         } else {
4046                 uint32_t *lut_dw = (uint32_t *)lut;
4047                 uint16_t i, lut_size_dw = lut_size / 4;
4048
4049                 if (vsi->type == I40E_VSI_SRIOV) {
4050                         for (i = 0; i < lut_size_dw; i++)
4051                                 I40E_WRITE_REG(
4052                                         hw,
4053                                         I40E_VFQF_HLUT1(i, vsi->user_param),
4054                                         lut_dw[i]);
4055                 } else {
4056                         for (i = 0; i < lut_size_dw; i++)
4057                                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4058                                                lut_dw[i]);
4059                 }
4060                 I40E_WRITE_FLUSH(hw);
4061         }
4062
4063         return 0;
4064 }
4065
4066 static int
4067 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4068                          struct rte_eth_rss_reta_entry64 *reta_conf,
4069                          uint16_t reta_size)
4070 {
4071         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4072         uint16_t i, lut_size = pf->hash_lut_size;
4073         uint16_t idx, shift;
4074         uint8_t *lut;
4075         int ret;
4076
4077         if (reta_size != lut_size ||
4078                 reta_size > ETH_RSS_RETA_SIZE_512) {
4079                 PMD_DRV_LOG(ERR,
4080                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4081                         reta_size, lut_size);
4082                 return -EINVAL;
4083         }
4084
4085         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4086         if (!lut) {
4087                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4088                 return -ENOMEM;
4089         }
4090         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4091         if (ret)
4092                 goto out;
4093         for (i = 0; i < reta_size; i++) {
4094                 idx = i / RTE_RETA_GROUP_SIZE;
4095                 shift = i % RTE_RETA_GROUP_SIZE;
4096                 if (reta_conf[idx].mask & (1ULL << shift))
4097                         lut[i] = reta_conf[idx].reta[shift];
4098         }
4099         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4100
4101 out:
4102         rte_free(lut);
4103
4104         return ret;
4105 }
4106
4107 static int
4108 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4109                         struct rte_eth_rss_reta_entry64 *reta_conf,
4110                         uint16_t reta_size)
4111 {
4112         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4113         uint16_t i, lut_size = pf->hash_lut_size;
4114         uint16_t idx, shift;
4115         uint8_t *lut;
4116         int ret;
4117
4118         if (reta_size != lut_size ||
4119                 reta_size > ETH_RSS_RETA_SIZE_512) {
4120                 PMD_DRV_LOG(ERR,
4121                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4122                         reta_size, lut_size);
4123                 return -EINVAL;
4124         }
4125
4126         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4127         if (!lut) {
4128                 PMD_DRV_LOG(ERR, "No memory can be allocated");
4129                 return -ENOMEM;
4130         }
4131
4132         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4133         if (ret)
4134                 goto out;
4135         for (i = 0; i < reta_size; i++) {
4136                 idx = i / RTE_RETA_GROUP_SIZE;
4137                 shift = i % RTE_RETA_GROUP_SIZE;
4138                 if (reta_conf[idx].mask & (1ULL << shift))
4139                         reta_conf[idx].reta[shift] = lut[i];
4140         }
4141
4142 out:
4143         rte_free(lut);
4144
4145         return ret;
4146 }
4147
4148 /**
4149  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4150  * @hw:   pointer to the HW structure
4151  * @mem:  pointer to mem struct to fill out
4152  * @size: size of memory requested
4153  * @alignment: what to align the allocation to
4154  **/
4155 enum i40e_status_code
4156 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4157                         struct i40e_dma_mem *mem,
4158                         u64 size,
4159                         u32 alignment)
4160 {
4161         const struct rte_memzone *mz = NULL;
4162         char z_name[RTE_MEMZONE_NAMESIZE];
4163
4164         if (!mem)
4165                 return I40E_ERR_PARAM;
4166
4167         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4168         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4169                         RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4170         if (!mz)
4171                 return I40E_ERR_NO_MEMORY;
4172
4173         mem->size = size;
4174         mem->va = mz->addr;
4175         mem->pa = mz->iova;
4176         mem->zone = (const void *)mz;
4177         PMD_DRV_LOG(DEBUG,
4178                 "memzone %s allocated with physical address: %"PRIu64,
4179                 mz->name, mem->pa);
4180
4181         return I40E_SUCCESS;
4182 }
4183
4184 /**
4185  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4186  * @hw:   pointer to the HW structure
4187  * @mem:  ptr to mem struct to free
4188  **/
4189 enum i40e_status_code
4190 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4191                     struct i40e_dma_mem *mem)
4192 {
4193         if (!mem)
4194                 return I40E_ERR_PARAM;
4195
4196         PMD_DRV_LOG(DEBUG,
4197                 "memzone %s to be freed with physical address: %"PRIu64,
4198                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4199         rte_memzone_free((const struct rte_memzone *)mem->zone);
4200         mem->zone = NULL;
4201         mem->va = NULL;
4202         mem->pa = (u64)0;
4203
4204         return I40E_SUCCESS;
4205 }
4206
4207 /**
4208  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4209  * @hw:   pointer to the HW structure
4210  * @mem:  pointer to mem struct to fill out
4211  * @size: size of memory requested
4212  **/
4213 enum i40e_status_code
4214 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4215                          struct i40e_virt_mem *mem,
4216                          u32 size)
4217 {
4218         if (!mem)
4219                 return I40E_ERR_PARAM;
4220
4221         mem->size = size;
4222         mem->va = rte_zmalloc("i40e", size, 0);
4223
4224         if (mem->va)
4225                 return I40E_SUCCESS;
4226         else
4227                 return I40E_ERR_NO_MEMORY;
4228 }
4229
4230 /**
4231  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4232  * @hw:   pointer to the HW structure
4233  * @mem:  pointer to mem struct to free
4234  **/
4235 enum i40e_status_code
4236 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4237                      struct i40e_virt_mem *mem)
4238 {
4239         if (!mem)
4240                 return I40E_ERR_PARAM;
4241
4242         rte_free(mem->va);
4243         mem->va = NULL;
4244
4245         return I40E_SUCCESS;
4246 }
4247
4248 void
4249 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4250 {
4251         rte_spinlock_init(&sp->spinlock);
4252 }
4253
4254 void
4255 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4256 {
4257         rte_spinlock_lock(&sp->spinlock);
4258 }
4259
4260 void
4261 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4262 {
4263         rte_spinlock_unlock(&sp->spinlock);
4264 }
4265
4266 void
4267 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4268 {
4269         return;
4270 }
4271
4272 /**
4273  * Get the hardware capabilities, which will be parsed
4274  * and saved into struct i40e_hw.
4275  */
4276 static int
4277 i40e_get_cap(struct i40e_hw *hw)
4278 {
4279         struct i40e_aqc_list_capabilities_element_resp *buf;
4280         uint16_t len, size = 0;
4281         int ret;
4282
4283         /* Calculate a huge enough buff for saving response data temporarily */
4284         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4285                                                 I40E_MAX_CAP_ELE_NUM;
4286         buf = rte_zmalloc("i40e", len, 0);
4287         if (!buf) {
4288                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4289                 return I40E_ERR_NO_MEMORY;
4290         }
4291
4292         /* Get, parse the capabilities and save it to hw */
4293         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4294                         i40e_aqc_opc_list_func_capabilities, NULL);
4295         if (ret != I40E_SUCCESS)
4296                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4297
4298         /* Free the temporary buffer after being used */
4299         rte_free(buf);
4300
4301         return ret;
4302 }
4303
4304 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF        4
4305 #define QUEUE_NUM_PER_VF_ARG                    "queue-num-per-vf"
4306
4307 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4308                 const char *value,
4309                 void *opaque)
4310 {
4311         struct i40e_pf *pf;
4312         unsigned long num;
4313         char *end;
4314
4315         pf = (struct i40e_pf *)opaque;
4316         RTE_SET_USED(key);
4317
4318         errno = 0;
4319         num = strtoul(value, &end, 0);
4320         if (errno != 0 || end == value || *end != 0) {
4321                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4322                             "kept the value = %hu", value, pf->vf_nb_qp_max);
4323                 return -(EINVAL);
4324         }
4325
4326         if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4327                 pf->vf_nb_qp_max = (uint16_t)num;
4328         else
4329                 /* here return 0 to make next valid same argument work */
4330                 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4331                             "power of 2 and equal or less than 16 !, Now it is "
4332                             "kept the value = %hu", num, pf->vf_nb_qp_max);
4333
4334         return 0;
4335 }
4336
4337 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4338 {
4339         static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
4340         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4341         struct rte_kvargs *kvlist;
4342
4343         /* set default queue number per VF as 4 */
4344         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4345
4346         if (dev->device->devargs == NULL)
4347                 return 0;
4348
4349         kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4350         if (kvlist == NULL)
4351                 return -(EINVAL);
4352
4353         if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
4354                 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4355                             "the first invalid or last valid one is used !",
4356                             QUEUE_NUM_PER_VF_ARG);
4357
4358         rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
4359                            i40e_pf_parse_vf_queue_number_handler, pf);
4360
4361         rte_kvargs_free(kvlist);
4362
4363         return 0;
4364 }
4365
4366 static int
4367 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4368 {
4369         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4370         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4371         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4372         uint16_t qp_count = 0, vsi_count = 0;
4373
4374         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4375                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4376                 return -EINVAL;
4377         }
4378
4379         i40e_pf_config_vf_rxq_number(dev);
4380
4381         /* Add the parameter init for LFC */
4382         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4383         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4384         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4385
4386         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4387         pf->max_num_vsi = hw->func_caps.num_vsis;
4388         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4389         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4390
4391         /* FDir queue/VSI allocation */
4392         pf->fdir_qp_offset = 0;
4393         if (hw->func_caps.fd) {
4394                 pf->flags |= I40E_FLAG_FDIR;
4395                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4396         } else {
4397                 pf->fdir_nb_qps = 0;
4398         }
4399         qp_count += pf->fdir_nb_qps;
4400         vsi_count += 1;
4401
4402         /* LAN queue/VSI allocation */
4403         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4404         if (!hw->func_caps.rss) {
4405                 pf->lan_nb_qps = 1;
4406         } else {
4407                 pf->flags |= I40E_FLAG_RSS;
4408                 if (hw->mac.type == I40E_MAC_X722)
4409                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4410                 pf->lan_nb_qps = pf->lan_nb_qp_max;
4411         }
4412         qp_count += pf->lan_nb_qps;
4413         vsi_count += 1;
4414
4415         /* VF queue/VSI allocation */
4416         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4417         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4418                 pf->flags |= I40E_FLAG_SRIOV;
4419                 pf->vf_nb_qps = pf->vf_nb_qp_max;
4420                 pf->vf_num = pci_dev->max_vfs;
4421                 PMD_DRV_LOG(DEBUG,
4422                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4423                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4424         } else {
4425                 pf->vf_nb_qps = 0;
4426                 pf->vf_num = 0;
4427         }
4428         qp_count += pf->vf_nb_qps * pf->vf_num;
4429         vsi_count += pf->vf_num;
4430
4431         /* VMDq queue/VSI allocation */
4432         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4433         pf->vmdq_nb_qps = 0;
4434         pf->max_nb_vmdq_vsi = 0;
4435         if (hw->func_caps.vmdq) {
4436                 if (qp_count < hw->func_caps.num_tx_qp &&
4437                         vsi_count < hw->func_caps.num_vsis) {
4438                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4439                                 qp_count) / pf->vmdq_nb_qp_max;
4440
4441                         /* Limit the maximum number of VMDq vsi to the maximum
4442                          * ethdev can support
4443                          */
4444                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4445                                 hw->func_caps.num_vsis - vsi_count);
4446                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4447                                 ETH_64_POOLS);
4448                         if (pf->max_nb_vmdq_vsi) {
4449                                 pf->flags |= I40E_FLAG_VMDQ;
4450                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4451                                 PMD_DRV_LOG(DEBUG,
4452                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4453                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4454                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4455                         } else {
4456                                 PMD_DRV_LOG(INFO,
4457                                         "No enough queues left for VMDq");
4458                         }
4459                 } else {
4460                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4461                 }
4462         }
4463         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4464         vsi_count += pf->max_nb_vmdq_vsi;
4465
4466         if (hw->func_caps.dcb)
4467                 pf->flags |= I40E_FLAG_DCB;
4468
4469         if (qp_count > hw->func_caps.num_tx_qp) {
4470                 PMD_DRV_LOG(ERR,
4471                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4472                         qp_count, hw->func_caps.num_tx_qp);
4473                 return -EINVAL;
4474         }
4475         if (vsi_count > hw->func_caps.num_vsis) {
4476                 PMD_DRV_LOG(ERR,
4477                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4478                         vsi_count, hw->func_caps.num_vsis);
4479                 return -EINVAL;
4480         }
4481
4482         return 0;
4483 }
4484
4485 static int
4486 i40e_pf_get_switch_config(struct i40e_pf *pf)
4487 {
4488         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4489         struct i40e_aqc_get_switch_config_resp *switch_config;
4490         struct i40e_aqc_switch_config_element_resp *element;
4491         uint16_t start_seid = 0, num_reported;
4492         int ret;
4493
4494         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4495                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4496         if (!switch_config) {
4497                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4498                 return -ENOMEM;
4499         }
4500
4501         /* Get the switch configurations */
4502         ret = i40e_aq_get_switch_config(hw, switch_config,
4503                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4504         if (ret != I40E_SUCCESS) {
4505                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4506                 goto fail;
4507         }
4508         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4509         if (num_reported != 1) { /* The number should be 1 */
4510                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4511                 goto fail;
4512         }
4513
4514         /* Parse the switch configuration elements */
4515         element = &(switch_config->element[0]);
4516         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4517                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4518                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4519         } else
4520                 PMD_DRV_LOG(INFO, "Unknown element type");
4521
4522 fail:
4523         rte_free(switch_config);
4524
4525         return ret;
4526 }
4527
4528 static int
4529 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4530                         uint32_t num)
4531 {
4532         struct pool_entry *entry;
4533
4534         if (pool == NULL || num == 0)
4535                 return -EINVAL;
4536
4537         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4538         if (entry == NULL) {
4539                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4540                 return -ENOMEM;
4541         }
4542
4543         /* queue heap initialize */
4544         pool->num_free = num;
4545         pool->num_alloc = 0;
4546         pool->base = base;
4547         LIST_INIT(&pool->alloc_list);
4548         LIST_INIT(&pool->free_list);
4549
4550         /* Initialize element  */
4551         entry->base = 0;
4552         entry->len = num;
4553
4554         LIST_INSERT_HEAD(&pool->free_list, entry, next);
4555         return 0;
4556 }
4557
4558 static void
4559 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4560 {
4561         struct pool_entry *entry, *next_entry;
4562
4563         if (pool == NULL)
4564                 return;
4565
4566         for (entry = LIST_FIRST(&pool->alloc_list);
4567                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4568                         entry = next_entry) {
4569                 LIST_REMOVE(entry, next);
4570                 rte_free(entry);
4571         }
4572
4573         for (entry = LIST_FIRST(&pool->free_list);
4574                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4575                         entry = next_entry) {
4576                 LIST_REMOVE(entry, next);
4577                 rte_free(entry);
4578         }
4579
4580         pool->num_free = 0;
4581         pool->num_alloc = 0;
4582         pool->base = 0;
4583         LIST_INIT(&pool->alloc_list);
4584         LIST_INIT(&pool->free_list);
4585 }
4586
4587 static int
4588 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4589                        uint32_t base)
4590 {
4591         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4592         uint32_t pool_offset;
4593         int insert;
4594
4595         if (pool == NULL) {
4596                 PMD_DRV_LOG(ERR, "Invalid parameter");
4597                 return -EINVAL;
4598         }
4599
4600         pool_offset = base - pool->base;
4601         /* Lookup in alloc list */
4602         LIST_FOREACH(entry, &pool->alloc_list, next) {
4603                 if (entry->base == pool_offset) {
4604                         valid_entry = entry;
4605                         LIST_REMOVE(entry, next);
4606                         break;
4607                 }
4608         }
4609
4610         /* Not find, return */
4611         if (valid_entry == NULL) {
4612                 PMD_DRV_LOG(ERR, "Failed to find entry");
4613                 return -EINVAL;
4614         }
4615
4616         /**
4617          * Found it, move it to free list  and try to merge.
4618          * In order to make merge easier, always sort it by qbase.
4619          * Find adjacent prev and last entries.
4620          */
4621         prev = next = NULL;
4622         LIST_FOREACH(entry, &pool->free_list, next) {
4623                 if (entry->base > valid_entry->base) {
4624                         next = entry;
4625                         break;
4626                 }
4627                 prev = entry;
4628         }
4629
4630         insert = 0;
4631         /* Try to merge with next one*/
4632         if (next != NULL) {
4633                 /* Merge with next one */
4634                 if (valid_entry->base + valid_entry->len == next->base) {
4635                         next->base = valid_entry->base;
4636                         next->len += valid_entry->len;
4637                         rte_free(valid_entry);
4638                         valid_entry = next;
4639                         insert = 1;
4640                 }
4641         }
4642
4643         if (prev != NULL) {
4644                 /* Merge with previous one */
4645                 if (prev->base + prev->len == valid_entry->base) {
4646                         prev->len += valid_entry->len;
4647                         /* If it merge with next one, remove next node */
4648                         if (insert == 1) {
4649                                 LIST_REMOVE(valid_entry, next);
4650                                 rte_free(valid_entry);
4651                         } else {
4652                                 rte_free(valid_entry);
4653                                 insert = 1;
4654                         }
4655                 }
4656         }
4657
4658         /* Not find any entry to merge, insert */
4659         if (insert == 0) {
4660                 if (prev != NULL)
4661                         LIST_INSERT_AFTER(prev, valid_entry, next);
4662                 else if (next != NULL)
4663                         LIST_INSERT_BEFORE(next, valid_entry, next);
4664                 else /* It's empty list, insert to head */
4665                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4666         }
4667
4668         pool->num_free += valid_entry->len;
4669         pool->num_alloc -= valid_entry->len;
4670
4671         return 0;
4672 }
4673
4674 static int
4675 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4676                        uint16_t num)
4677 {
4678         struct pool_entry *entry, *valid_entry;
4679
4680         if (pool == NULL || num == 0) {
4681                 PMD_DRV_LOG(ERR, "Invalid parameter");
4682                 return -EINVAL;
4683         }
4684
4685         if (pool->num_free < num) {
4686                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4687                             num, pool->num_free);
4688                 return -ENOMEM;
4689         }
4690
4691         valid_entry = NULL;
4692         /* Lookup  in free list and find most fit one */
4693         LIST_FOREACH(entry, &pool->free_list, next) {
4694                 if (entry->len >= num) {
4695                         /* Find best one */
4696                         if (entry->len == num) {
4697                                 valid_entry = entry;
4698                                 break;
4699                         }
4700                         if (valid_entry == NULL || valid_entry->len > entry->len)
4701                                 valid_entry = entry;
4702                 }
4703         }
4704
4705         /* Not find one to satisfy the request, return */
4706         if (valid_entry == NULL) {
4707                 PMD_DRV_LOG(ERR, "No valid entry found");
4708                 return -ENOMEM;
4709         }
4710         /**
4711          * The entry have equal queue number as requested,
4712          * remove it from alloc_list.
4713          */
4714         if (valid_entry->len == num) {
4715                 LIST_REMOVE(valid_entry, next);
4716         } else {
4717                 /**
4718                  * The entry have more numbers than requested,
4719                  * create a new entry for alloc_list and minus its
4720                  * queue base and number in free_list.
4721                  */
4722                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4723                 if (entry == NULL) {
4724                         PMD_DRV_LOG(ERR,
4725                                 "Failed to allocate memory for resource pool");
4726                         return -ENOMEM;
4727                 }
4728                 entry->base = valid_entry->base;
4729                 entry->len = num;
4730                 valid_entry->base += num;
4731                 valid_entry->len -= num;
4732                 valid_entry = entry;
4733         }
4734
4735         /* Insert it into alloc list, not sorted */
4736         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4737
4738         pool->num_free -= valid_entry->len;
4739         pool->num_alloc += valid_entry->len;
4740
4741         return valid_entry->base + pool->base;
4742 }
4743
4744 /**
4745  * bitmap_is_subset - Check whether src2 is subset of src1
4746  **/
4747 static inline int
4748 bitmap_is_subset(uint8_t src1, uint8_t src2)
4749 {
4750         return !((src1 ^ src2) & src2);
4751 }
4752
4753 static enum i40e_status_code
4754 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4755 {
4756         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4757
4758         /* If DCB is not supported, only default TC is supported */
4759         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4760                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4761                 return I40E_NOT_SUPPORTED;
4762         }
4763
4764         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4765                 PMD_DRV_LOG(ERR,
4766                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4767                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4768                 return I40E_NOT_SUPPORTED;
4769         }
4770         return I40E_SUCCESS;
4771 }
4772
4773 int
4774 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4775                                 struct i40e_vsi_vlan_pvid_info *info)
4776 {
4777         struct i40e_hw *hw;
4778         struct i40e_vsi_context ctxt;
4779         uint8_t vlan_flags = 0;
4780         int ret;
4781
4782         if (vsi == NULL || info == NULL) {
4783                 PMD_DRV_LOG(ERR, "invalid parameters");
4784                 return I40E_ERR_PARAM;
4785         }
4786
4787         if (info->on) {
4788                 vsi->info.pvid = info->config.pvid;
4789                 /**
4790                  * If insert pvid is enabled, only tagged pkts are
4791                  * allowed to be sent out.
4792                  */
4793                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4794                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4795         } else {
4796                 vsi->info.pvid = 0;
4797                 if (info->config.reject.tagged == 0)
4798                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4799
4800                 if (info->config.reject.untagged == 0)
4801                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4802         }
4803         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4804                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4805         vsi->info.port_vlan_flags |= vlan_flags;
4806         vsi->info.valid_sections =
4807                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4808         memset(&ctxt, 0, sizeof(ctxt));
4809         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4810         ctxt.seid = vsi->seid;
4811
4812         hw = I40E_VSI_TO_HW(vsi);
4813         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4814         if (ret != I40E_SUCCESS)
4815                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4816
4817         return ret;
4818 }
4819
4820 static int
4821 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4822 {
4823         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4824         int i, ret;
4825         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4826
4827         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4828         if (ret != I40E_SUCCESS)
4829                 return ret;
4830
4831         if (!vsi->seid) {
4832                 PMD_DRV_LOG(ERR, "seid not valid");
4833                 return -EINVAL;
4834         }
4835
4836         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4837         tc_bw_data.tc_valid_bits = enabled_tcmap;
4838         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4839                 tc_bw_data.tc_bw_credits[i] =
4840                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4841
4842         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4843         if (ret != I40E_SUCCESS) {
4844                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4845                 return ret;
4846         }
4847
4848         rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4849                                         sizeof(vsi->info.qs_handle));
4850         return I40E_SUCCESS;
4851 }
4852
4853 static enum i40e_status_code
4854 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4855                                  struct i40e_aqc_vsi_properties_data *info,
4856                                  uint8_t enabled_tcmap)
4857 {
4858         enum i40e_status_code ret;
4859         int i, total_tc = 0;
4860         uint16_t qpnum_per_tc, bsf, qp_idx;
4861
4862         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4863         if (ret != I40E_SUCCESS)
4864                 return ret;
4865
4866         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4867                 if (enabled_tcmap & (1 << i))
4868                         total_tc++;
4869         if (total_tc == 0)
4870                 total_tc = 1;
4871         vsi->enabled_tc = enabled_tcmap;
4872
4873         /* Number of queues per enabled TC */
4874         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4875         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4876         bsf = rte_bsf32(qpnum_per_tc);
4877
4878         /* Adjust the queue number to actual queues that can be applied */
4879         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4880                 vsi->nb_qps = qpnum_per_tc * total_tc;
4881
4882         /**
4883          * Configure TC and queue mapping parameters, for enabled TC,
4884          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4885          * default queue will serve it.
4886          */
4887         qp_idx = 0;
4888         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4889                 if (vsi->enabled_tc & (1 << i)) {
4890                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4891                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4892                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4893                         qp_idx += qpnum_per_tc;
4894                 } else
4895                         info->tc_mapping[i] = 0;
4896         }
4897
4898         /* Associate queue number with VSI */
4899         if (vsi->type == I40E_VSI_SRIOV) {
4900                 info->mapping_flags |=
4901                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4902                 for (i = 0; i < vsi->nb_qps; i++)
4903                         info->queue_mapping[i] =
4904                                 rte_cpu_to_le_16(vsi->base_queue + i);
4905         } else {
4906                 info->mapping_flags |=
4907                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4908                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4909         }
4910         info->valid_sections |=
4911                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4912
4913         return I40E_SUCCESS;
4914 }
4915
4916 static int
4917 i40e_veb_release(struct i40e_veb *veb)
4918 {
4919         struct i40e_vsi *vsi;
4920         struct i40e_hw *hw;
4921
4922         if (veb == NULL)
4923                 return -EINVAL;
4924
4925         if (!TAILQ_EMPTY(&veb->head)) {
4926                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4927                 return -EACCES;
4928         }
4929         /* associate_vsi field is NULL for floating VEB */
4930         if (veb->associate_vsi != NULL) {
4931                 vsi = veb->associate_vsi;
4932                 hw = I40E_VSI_TO_HW(vsi);
4933
4934                 vsi->uplink_seid = veb->uplink_seid;
4935                 vsi->veb = NULL;
4936         } else {
4937                 veb->associate_pf->main_vsi->floating_veb = NULL;
4938                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4939         }
4940
4941         i40e_aq_delete_element(hw, veb->seid, NULL);
4942         rte_free(veb);
4943         return I40E_SUCCESS;
4944 }
4945
4946 /* Setup a veb */
4947 static struct i40e_veb *
4948 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4949 {
4950         struct i40e_veb *veb;
4951         int ret;
4952         struct i40e_hw *hw;
4953
4954         if (pf == NULL) {
4955                 PMD_DRV_LOG(ERR,
4956                             "veb setup failed, associated PF shouldn't null");
4957                 return NULL;
4958         }
4959         hw = I40E_PF_TO_HW(pf);
4960
4961         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4962         if (!veb) {
4963                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4964                 goto fail;
4965         }
4966
4967         veb->associate_vsi = vsi;
4968         veb->associate_pf = pf;
4969         TAILQ_INIT(&veb->head);
4970         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4971
4972         /* create floating veb if vsi is NULL */
4973         if (vsi != NULL) {
4974                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4975                                       I40E_DEFAULT_TCMAP, false,
4976                                       &veb->seid, false, NULL);
4977         } else {
4978                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4979                                       true, &veb->seid, false, NULL);
4980         }
4981
4982         if (ret != I40E_SUCCESS) {
4983                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4984                             hw->aq.asq_last_status);
4985                 goto fail;
4986         }
4987         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4988
4989         /* get statistics index */
4990         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4991                                 &veb->stats_idx, NULL, NULL, NULL);
4992         if (ret != I40E_SUCCESS) {
4993                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4994                             hw->aq.asq_last_status);
4995                 goto fail;
4996         }
4997         /* Get VEB bandwidth, to be implemented */
4998         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4999         if (vsi)
5000                 vsi->uplink_seid = veb->seid;
5001
5002         return veb;
5003 fail:
5004         rte_free(veb);
5005         return NULL;
5006 }
5007
5008 int
5009 i40e_vsi_release(struct i40e_vsi *vsi)
5010 {
5011         struct i40e_pf *pf;
5012         struct i40e_hw *hw;
5013         struct i40e_vsi_list *vsi_list;
5014         void *temp;
5015         int ret;
5016         struct i40e_mac_filter *f;
5017         uint16_t user_param;
5018
5019         if (!vsi)
5020                 return I40E_SUCCESS;
5021
5022         if (!vsi->adapter)
5023                 return -EFAULT;
5024
5025         user_param = vsi->user_param;
5026
5027         pf = I40E_VSI_TO_PF(vsi);
5028         hw = I40E_VSI_TO_HW(vsi);
5029
5030         /* VSI has child to attach, release child first */
5031         if (vsi->veb) {
5032                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5033                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5034                                 return -1;
5035                 }
5036                 i40e_veb_release(vsi->veb);
5037         }
5038
5039         if (vsi->floating_veb) {
5040                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5041                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5042                                 return -1;
5043                 }
5044         }
5045
5046         /* Remove all macvlan filters of the VSI */
5047         i40e_vsi_remove_all_macvlan_filter(vsi);
5048         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5049                 rte_free(f);
5050
5051         if (vsi->type != I40E_VSI_MAIN &&
5052             ((vsi->type != I40E_VSI_SRIOV) ||
5053             !pf->floating_veb_list[user_param])) {
5054                 /* Remove vsi from parent's sibling list */
5055                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5056                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5057                         return I40E_ERR_PARAM;
5058                 }
5059                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5060                                 &vsi->sib_vsi_list, list);
5061
5062                 /* Remove all switch element of the VSI */
5063                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5064                 if (ret != I40E_SUCCESS)
5065                         PMD_DRV_LOG(ERR, "Failed to delete element");
5066         }
5067
5068         if ((vsi->type == I40E_VSI_SRIOV) &&
5069             pf->floating_veb_list[user_param]) {
5070                 /* Remove vsi from parent's sibling list */
5071                 if (vsi->parent_vsi == NULL ||
5072                     vsi->parent_vsi->floating_veb == NULL) {
5073                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5074                         return I40E_ERR_PARAM;
5075                 }
5076                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5077                              &vsi->sib_vsi_list, list);
5078
5079                 /* Remove all switch element of the VSI */
5080                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5081                 if (ret != I40E_SUCCESS)
5082                         PMD_DRV_LOG(ERR, "Failed to delete element");
5083         }
5084
5085         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5086
5087         if (vsi->type != I40E_VSI_SRIOV)
5088                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5089         rte_free(vsi);
5090
5091         return I40E_SUCCESS;
5092 }
5093
5094 static int
5095 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5096 {
5097         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5098         struct i40e_aqc_remove_macvlan_element_data def_filter;
5099         struct i40e_mac_filter_info filter;
5100         int ret;
5101
5102         if (vsi->type != I40E_VSI_MAIN)
5103                 return I40E_ERR_CONFIG;
5104         memset(&def_filter, 0, sizeof(def_filter));
5105         rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5106                                         ETH_ADDR_LEN);
5107         def_filter.vlan_tag = 0;
5108         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5109                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5110         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5111         if (ret != I40E_SUCCESS) {
5112                 struct i40e_mac_filter *f;
5113                 struct ether_addr *mac;
5114
5115                 PMD_DRV_LOG(DEBUG,
5116                             "Cannot remove the default macvlan filter");
5117                 /* It needs to add the permanent mac into mac list */
5118                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5119                 if (f == NULL) {
5120                         PMD_DRV_LOG(ERR, "failed to allocate memory");
5121                         return I40E_ERR_NO_MEMORY;
5122                 }
5123                 mac = &f->mac_info.mac_addr;
5124                 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5125                                 ETH_ADDR_LEN);
5126                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5127                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5128                 vsi->mac_num++;
5129
5130                 return ret;
5131         }
5132         rte_memcpy(&filter.mac_addr,
5133                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5134         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5135         return i40e_vsi_add_mac(vsi, &filter);
5136 }
5137
5138 /*
5139  * i40e_vsi_get_bw_config - Query VSI BW Information
5140  * @vsi: the VSI to be queried
5141  *
5142  * Returns 0 on success, negative value on failure
5143  */
5144 static enum i40e_status_code
5145 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5146 {
5147         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5148         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5149         struct i40e_hw *hw = &vsi->adapter->hw;
5150         i40e_status ret;
5151         int i;
5152         uint32_t bw_max;
5153
5154         memset(&bw_config, 0, sizeof(bw_config));
5155         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5156         if (ret != I40E_SUCCESS) {
5157                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5158                             hw->aq.asq_last_status);
5159                 return ret;
5160         }
5161
5162         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5163         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5164                                         &ets_sla_config, NULL);
5165         if (ret != I40E_SUCCESS) {
5166                 PMD_DRV_LOG(ERR,
5167                         "VSI failed to get TC bandwdith configuration %u",
5168                         hw->aq.asq_last_status);
5169                 return ret;
5170         }
5171
5172         /* store and print out BW info */
5173         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5174         vsi->bw_info.bw_max = bw_config.max_bw;
5175         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5176         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5177         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5178                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5179                      I40E_16_BIT_WIDTH);
5180         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5181                 vsi->bw_info.bw_ets_share_credits[i] =
5182                                 ets_sla_config.share_credits[i];
5183                 vsi->bw_info.bw_ets_credits[i] =
5184                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5185                 /* 4 bits per TC, 4th bit is reserved */
5186                 vsi->bw_info.bw_ets_max[i] =
5187                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5188                                   RTE_LEN2MASK(3, uint8_t));
5189                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5190                             vsi->bw_info.bw_ets_share_credits[i]);
5191                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5192                             vsi->bw_info.bw_ets_credits[i]);
5193                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5194                             vsi->bw_info.bw_ets_max[i]);
5195         }
5196
5197         return I40E_SUCCESS;
5198 }
5199
5200 /* i40e_enable_pf_lb
5201  * @pf: pointer to the pf structure
5202  *
5203  * allow loopback on pf
5204  */
5205 static inline void
5206 i40e_enable_pf_lb(struct i40e_pf *pf)
5207 {
5208         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5209         struct i40e_vsi_context ctxt;
5210         int ret;
5211
5212         /* Use the FW API if FW >= v5.0 */
5213         if (hw->aq.fw_maj_ver < 5) {
5214                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5215                 return;
5216         }
5217
5218         memset(&ctxt, 0, sizeof(ctxt));
5219         ctxt.seid = pf->main_vsi_seid;
5220         ctxt.pf_num = hw->pf_id;
5221         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5222         if (ret) {
5223                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5224                             ret, hw->aq.asq_last_status);
5225                 return;
5226         }
5227         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5228         ctxt.info.valid_sections =
5229                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5230         ctxt.info.switch_id |=
5231                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5232
5233         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5234         if (ret)
5235                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5236                             hw->aq.asq_last_status);
5237 }
5238
5239 /* Setup a VSI */
5240 struct i40e_vsi *
5241 i40e_vsi_setup(struct i40e_pf *pf,
5242                enum i40e_vsi_type type,
5243                struct i40e_vsi *uplink_vsi,
5244                uint16_t user_param)
5245 {
5246         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5247         struct i40e_vsi *vsi;
5248         struct i40e_mac_filter_info filter;
5249         int ret;
5250         struct i40e_vsi_context ctxt;
5251         struct ether_addr broadcast =
5252                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5253
5254         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5255             uplink_vsi == NULL) {
5256                 PMD_DRV_LOG(ERR,
5257                         "VSI setup failed, VSI link shouldn't be NULL");
5258                 return NULL;
5259         }
5260
5261         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5262                 PMD_DRV_LOG(ERR,
5263                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5264                 return NULL;
5265         }
5266
5267         /* two situations
5268          * 1.type is not MAIN and uplink vsi is not NULL
5269          * If uplink vsi didn't setup VEB, create one first under veb field
5270          * 2.type is SRIOV and the uplink is NULL
5271          * If floating VEB is NULL, create one veb under floating veb field
5272          */
5273
5274         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5275             uplink_vsi->veb == NULL) {
5276                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5277
5278                 if (uplink_vsi->veb == NULL) {
5279                         PMD_DRV_LOG(ERR, "VEB setup failed");
5280                         return NULL;
5281                 }
5282                 /* set ALLOWLOOPBACk on pf, when veb is created */
5283                 i40e_enable_pf_lb(pf);
5284         }
5285
5286         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5287             pf->main_vsi->floating_veb == NULL) {
5288                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5289
5290                 if (pf->main_vsi->floating_veb == NULL) {
5291                         PMD_DRV_LOG(ERR, "VEB setup failed");
5292                         return NULL;
5293                 }
5294         }
5295
5296         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5297         if (!vsi) {
5298                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5299                 return NULL;
5300         }
5301         TAILQ_INIT(&vsi->mac_list);
5302         vsi->type = type;
5303         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5304         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5305         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5306         vsi->user_param = user_param;
5307         vsi->vlan_anti_spoof_on = 0;
5308         vsi->vlan_filter_on = 0;
5309         /* Allocate queues */
5310         switch (vsi->type) {
5311         case I40E_VSI_MAIN  :
5312                 vsi->nb_qps = pf->lan_nb_qps;
5313                 break;
5314         case I40E_VSI_SRIOV :
5315                 vsi->nb_qps = pf->vf_nb_qps;
5316                 break;
5317         case I40E_VSI_VMDQ2:
5318                 vsi->nb_qps = pf->vmdq_nb_qps;
5319                 break;
5320         case I40E_VSI_FDIR:
5321                 vsi->nb_qps = pf->fdir_nb_qps;
5322                 break;
5323         default:
5324                 goto fail_mem;
5325         }
5326         /*
5327          * The filter status descriptor is reported in rx queue 0,
5328          * while the tx queue for fdir filter programming has no
5329          * such constraints, can be non-zero queues.
5330          * To simplify it, choose FDIR vsi use queue 0 pair.
5331          * To make sure it will use queue 0 pair, queue allocation
5332          * need be done before this function is called
5333          */
5334         if (type != I40E_VSI_FDIR) {
5335                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5336                         if (ret < 0) {
5337                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5338                                                 vsi->seid, ret);
5339                                 goto fail_mem;
5340                         }
5341                         vsi->base_queue = ret;
5342         } else
5343                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5344
5345         /* VF has MSIX interrupt in VF range, don't allocate here */
5346         if (type == I40E_VSI_MAIN) {
5347                 if (pf->support_multi_driver) {
5348                         /* If support multi-driver, need to use INT0 instead of
5349                          * allocating from msix pool. The Msix pool is init from
5350                          * INT1, so it's OK just set msix_intr to 0 and nb_msix
5351                          * to 1 without calling i40e_res_pool_alloc.
5352                          */
5353                         vsi->msix_intr = 0;
5354                         vsi->nb_msix = 1;
5355                 } else {
5356                         ret = i40e_res_pool_alloc(&pf->msix_pool,
5357                                                   RTE_MIN(vsi->nb_qps,
5358                                                      RTE_MAX_RXTX_INTR_VEC_ID));
5359                         if (ret < 0) {
5360                                 PMD_DRV_LOG(ERR,
5361                                             "VSI MAIN %d get heap failed %d",
5362                                             vsi->seid, ret);
5363                                 goto fail_queue_alloc;
5364                         }
5365                         vsi->msix_intr = ret;
5366                         vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5367                                                RTE_MAX_RXTX_INTR_VEC_ID);
5368                 }
5369         } else if (type != I40E_VSI_SRIOV) {
5370                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5371                 if (ret < 0) {
5372                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5373                         goto fail_queue_alloc;
5374                 }
5375                 vsi->msix_intr = ret;
5376                 vsi->nb_msix = 1;
5377         } else {
5378                 vsi->msix_intr = 0;
5379                 vsi->nb_msix = 0;
5380         }
5381
5382         /* Add VSI */
5383         if (type == I40E_VSI_MAIN) {
5384                 /* For main VSI, no need to add since it's default one */
5385                 vsi->uplink_seid = pf->mac_seid;
5386                 vsi->seid = pf->main_vsi_seid;
5387                 /* Bind queues with specific MSIX interrupt */
5388                 /**
5389                  * Needs 2 interrupt at least, one for misc cause which will
5390                  * enabled from OS side, Another for queues binding the
5391                  * interrupt from device side only.
5392                  */
5393
5394                 /* Get default VSI parameters from hardware */
5395                 memset(&ctxt, 0, sizeof(ctxt));
5396                 ctxt.seid = vsi->seid;
5397                 ctxt.pf_num = hw->pf_id;
5398                 ctxt.uplink_seid = vsi->uplink_seid;
5399                 ctxt.vf_num = 0;
5400                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5401                 if (ret != I40E_SUCCESS) {
5402                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
5403                         goto fail_msix_alloc;
5404                 }
5405                 rte_memcpy(&vsi->info, &ctxt.info,
5406                         sizeof(struct i40e_aqc_vsi_properties_data));
5407                 vsi->vsi_id = ctxt.vsi_number;
5408                 vsi->info.valid_sections = 0;
5409
5410                 /* Configure tc, enabled TC0 only */
5411                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5412                         I40E_SUCCESS) {
5413                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5414                         goto fail_msix_alloc;
5415                 }
5416
5417                 /* TC, queue mapping */
5418                 memset(&ctxt, 0, sizeof(ctxt));
5419                 vsi->info.valid_sections |=
5420                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5421                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5422                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5423                 rte_memcpy(&ctxt.info, &vsi->info,
5424                         sizeof(struct i40e_aqc_vsi_properties_data));
5425                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5426                                                 I40E_DEFAULT_TCMAP);
5427                 if (ret != I40E_SUCCESS) {
5428                         PMD_DRV_LOG(ERR,
5429                                 "Failed to configure TC queue mapping");
5430                         goto fail_msix_alloc;
5431                 }
5432                 ctxt.seid = vsi->seid;
5433                 ctxt.pf_num = hw->pf_id;
5434                 ctxt.uplink_seid = vsi->uplink_seid;
5435                 ctxt.vf_num = 0;
5436
5437                 /* Update VSI parameters */
5438                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5439                 if (ret != I40E_SUCCESS) {
5440                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
5441                         goto fail_msix_alloc;
5442                 }
5443
5444                 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5445                                                 sizeof(vsi->info.tc_mapping));
5446                 rte_memcpy(&vsi->info.queue_mapping,
5447                                 &ctxt.info.queue_mapping,
5448                         sizeof(vsi->info.queue_mapping));
5449                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5450                 vsi->info.valid_sections = 0;
5451
5452                 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5453                                 ETH_ADDR_LEN);
5454
5455                 /**
5456                  * Updating default filter settings are necessary to prevent
5457                  * reception of tagged packets.
5458                  * Some old firmware configurations load a default macvlan
5459                  * filter which accepts both tagged and untagged packets.
5460                  * The updating is to use a normal filter instead if needed.
5461                  * For NVM 4.2.2 or after, the updating is not needed anymore.
5462                  * The firmware with correct configurations load the default
5463                  * macvlan filter which is expected and cannot be removed.
5464                  */
5465                 i40e_update_default_filter_setting(vsi);
5466                 i40e_config_qinq(hw, vsi);
5467         } else if (type == I40E_VSI_SRIOV) {
5468                 memset(&ctxt, 0, sizeof(ctxt));
5469                 /**
5470                  * For other VSI, the uplink_seid equals to uplink VSI's
5471                  * uplink_seid since they share same VEB
5472                  */
5473                 if (uplink_vsi == NULL)
5474                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5475                 else
5476                         vsi->uplink_seid = uplink_vsi->uplink_seid;
5477                 ctxt.pf_num = hw->pf_id;
5478                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5479                 ctxt.uplink_seid = vsi->uplink_seid;
5480                 ctxt.connection_type = 0x1;
5481                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5482
5483                 /* Use the VEB configuration if FW >= v5.0 */
5484                 if (hw->aq.fw_maj_ver >= 5) {
5485                         /* Configure switch ID */
5486                         ctxt.info.valid_sections |=
5487                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5488                         ctxt.info.switch_id =
5489                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5490                 }
5491
5492                 /* Configure port/vlan */
5493                 ctxt.info.valid_sections |=
5494                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5495                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5496                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5497                                                 hw->func_caps.enabled_tcmap);
5498                 if (ret != I40E_SUCCESS) {
5499                         PMD_DRV_LOG(ERR,
5500                                 "Failed to configure TC queue mapping");
5501                         goto fail_msix_alloc;
5502                 }
5503
5504                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5505                 ctxt.info.valid_sections |=
5506                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5507                 /**
5508                  * Since VSI is not created yet, only configure parameter,
5509                  * will add vsi below.
5510                  */
5511
5512                 i40e_config_qinq(hw, vsi);
5513         } else if (type == I40E_VSI_VMDQ2) {
5514                 memset(&ctxt, 0, sizeof(ctxt));
5515                 /*
5516                  * For other VSI, the uplink_seid equals to uplink VSI's
5517                  * uplink_seid since they share same VEB
5518                  */
5519                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5520                 ctxt.pf_num = hw->pf_id;
5521                 ctxt.vf_num = 0;
5522                 ctxt.uplink_seid = vsi->uplink_seid;
5523                 ctxt.connection_type = 0x1;
5524                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5525
5526                 ctxt.info.valid_sections |=
5527                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5528                 /* user_param carries flag to enable loop back */
5529                 if (user_param) {
5530                         ctxt.info.switch_id =
5531                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5532                         ctxt.info.switch_id |=
5533                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5534                 }
5535
5536                 /* Configure port/vlan */
5537                 ctxt.info.valid_sections |=
5538                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5539                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5540                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5541                                                 I40E_DEFAULT_TCMAP);
5542                 if (ret != I40E_SUCCESS) {
5543                         PMD_DRV_LOG(ERR,
5544                                 "Failed to configure TC queue mapping");
5545                         goto fail_msix_alloc;
5546                 }
5547                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5548                 ctxt.info.valid_sections |=
5549                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5550         } else if (type == I40E_VSI_FDIR) {
5551                 memset(&ctxt, 0, sizeof(ctxt));
5552                 vsi->uplink_seid = uplink_vsi->uplink_seid;
5553                 ctxt.pf_num = hw->pf_id;
5554                 ctxt.vf_num = 0;
5555                 ctxt.uplink_seid = vsi->uplink_seid;
5556                 ctxt.connection_type = 0x1;     /* regular data port */
5557                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5558                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5559                                                 I40E_DEFAULT_TCMAP);
5560                 if (ret != I40E_SUCCESS) {
5561                         PMD_DRV_LOG(ERR,
5562                                 "Failed to configure TC queue mapping.");
5563                         goto fail_msix_alloc;
5564                 }
5565                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5566                 ctxt.info.valid_sections |=
5567                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5568         } else {
5569                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5570                 goto fail_msix_alloc;
5571         }
5572
5573         if (vsi->type != I40E_VSI_MAIN) {
5574                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5575                 if (ret != I40E_SUCCESS) {
5576                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5577                                     hw->aq.asq_last_status);
5578                         goto fail_msix_alloc;
5579                 }
5580                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5581                 vsi->info.valid_sections = 0;
5582                 vsi->seid = ctxt.seid;
5583                 vsi->vsi_id = ctxt.vsi_number;
5584                 vsi->sib_vsi_list.vsi = vsi;
5585                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5586                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5587                                           &vsi->sib_vsi_list, list);
5588                 } else {
5589                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5590                                           &vsi->sib_vsi_list, list);
5591                 }
5592         }
5593
5594         /* MAC/VLAN configuration */
5595         rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5596         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5597
5598         ret = i40e_vsi_add_mac(vsi, &filter);
5599         if (ret != I40E_SUCCESS) {
5600                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5601                 goto fail_msix_alloc;
5602         }
5603
5604         /* Get VSI BW information */
5605         i40e_vsi_get_bw_config(vsi);
5606         return vsi;
5607 fail_msix_alloc:
5608         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5609 fail_queue_alloc:
5610         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5611 fail_mem:
5612         rte_free(vsi);
5613         return NULL;
5614 }
5615
5616 /* Configure vlan filter on or off */
5617 int
5618 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5619 {
5620         int i, num;
5621         struct i40e_mac_filter *f;
5622         void *temp;
5623         struct i40e_mac_filter_info *mac_filter;
5624         enum rte_mac_filter_type desired_filter;
5625         int ret = I40E_SUCCESS;
5626
5627         if (on) {
5628                 /* Filter to match MAC and VLAN */
5629                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5630         } else {
5631                 /* Filter to match only MAC */
5632                 desired_filter = RTE_MAC_PERFECT_MATCH;
5633         }
5634
5635         num = vsi->mac_num;
5636
5637         mac_filter = rte_zmalloc("mac_filter_info_data",
5638                                  num * sizeof(*mac_filter), 0);
5639         if (mac_filter == NULL) {
5640                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5641                 return I40E_ERR_NO_MEMORY;
5642         }
5643
5644         i = 0;
5645
5646         /* Remove all existing mac */
5647         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5648                 mac_filter[i] = f->mac_info;
5649                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5650                 if (ret) {
5651                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5652                                     on ? "enable" : "disable");
5653                         goto DONE;
5654                 }
5655                 i++;
5656         }
5657
5658         /* Override with new filter */
5659         for (i = 0; i < num; i++) {
5660                 mac_filter[i].filter_type = desired_filter;
5661                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5662                 if (ret) {
5663                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5664                                     on ? "enable" : "disable");
5665                         goto DONE;
5666                 }
5667         }
5668
5669 DONE:
5670         rte_free(mac_filter);
5671         return ret;
5672 }
5673
5674 /* Configure vlan stripping on or off */
5675 int
5676 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5677 {
5678         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5679         struct i40e_vsi_context ctxt;
5680         uint8_t vlan_flags;
5681         int ret = I40E_SUCCESS;
5682
5683         /* Check if it has been already on or off */
5684         if (vsi->info.valid_sections &
5685                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5686                 if (on) {
5687                         if ((vsi->info.port_vlan_flags &
5688                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5689                                 return 0; /* already on */
5690                 } else {
5691                         if ((vsi->info.port_vlan_flags &
5692                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5693                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5694                                 return 0; /* already off */
5695                 }
5696         }
5697
5698         if (on)
5699                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5700         else
5701                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5702         vsi->info.valid_sections =
5703                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5704         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5705         vsi->info.port_vlan_flags |= vlan_flags;
5706         ctxt.seid = vsi->seid;
5707         rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5708         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5709         if (ret)
5710                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5711                             on ? "enable" : "disable");
5712
5713         return ret;
5714 }
5715
5716 static int
5717 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5718 {
5719         struct rte_eth_dev_data *data = dev->data;
5720         int ret;
5721         int mask = 0;
5722
5723         /* Apply vlan offload setting */
5724         mask = ETH_VLAN_STRIP_MASK |
5725                ETH_VLAN_FILTER_MASK |
5726                ETH_VLAN_EXTEND_MASK;
5727         ret = i40e_vlan_offload_set(dev, mask);
5728         if (ret) {
5729                 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5730                 return ret;
5731         }
5732
5733         /* Apply pvid setting */
5734         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5735                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5736         if (ret)
5737                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5738
5739         return ret;
5740 }
5741
5742 static int
5743 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5744 {
5745         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5746
5747         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5748 }
5749
5750 static int
5751 i40e_update_flow_control(struct i40e_hw *hw)
5752 {
5753 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5754         struct i40e_link_status link_status;
5755         uint32_t rxfc = 0, txfc = 0, reg;
5756         uint8_t an_info;
5757         int ret;
5758
5759         memset(&link_status, 0, sizeof(link_status));
5760         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5761         if (ret != I40E_SUCCESS) {
5762                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5763                 goto write_reg; /* Disable flow control */
5764         }
5765
5766         an_info = hw->phy.link_info.an_info;
5767         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5768                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5769                 ret = I40E_ERR_NOT_READY;
5770                 goto write_reg; /* Disable flow control */
5771         }
5772         /**
5773          * If link auto negotiation is enabled, flow control needs to
5774          * be configured according to it
5775          */
5776         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5777         case I40E_LINK_PAUSE_RXTX:
5778                 rxfc = 1;
5779                 txfc = 1;
5780                 hw->fc.current_mode = I40E_FC_FULL;
5781                 break;
5782         case I40E_AQ_LINK_PAUSE_RX:
5783                 rxfc = 1;
5784                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5785                 break;
5786         case I40E_AQ_LINK_PAUSE_TX:
5787                 txfc = 1;
5788                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5789                 break;
5790         default:
5791                 hw->fc.current_mode = I40E_FC_NONE;
5792                 break;
5793         }
5794
5795 write_reg:
5796         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5797                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5798         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5799         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5800         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5801         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5802
5803         return ret;
5804 }
5805
5806 /* PF setup */
5807 static int
5808 i40e_pf_setup(struct i40e_pf *pf)
5809 {
5810         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5811         struct i40e_filter_control_settings settings;
5812         struct i40e_vsi *vsi;
5813         int ret;
5814
5815         /* Clear all stats counters */
5816         pf->offset_loaded = FALSE;
5817         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5818         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5819         memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5820         memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5821
5822         ret = i40e_pf_get_switch_config(pf);
5823         if (ret != I40E_SUCCESS) {
5824                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5825                 return ret;
5826         }
5827
5828         ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
5829         if (ret)
5830                 PMD_INIT_LOG(WARNING,
5831                         "failed to allocate switch domain for device %d", ret);
5832
5833         if (pf->flags & I40E_FLAG_FDIR) {
5834                 /* make queue allocated first, let FDIR use queue pair 0*/
5835                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5836                 if (ret != I40E_FDIR_QUEUE_ID) {
5837                         PMD_DRV_LOG(ERR,
5838                                 "queue allocation fails for FDIR: ret =%d",
5839                                 ret);
5840                         pf->flags &= ~I40E_FLAG_FDIR;
5841                 }
5842         }
5843         /*  main VSI setup */
5844         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5845         if (!vsi) {
5846                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5847                 return I40E_ERR_NOT_READY;
5848         }
5849         pf->main_vsi = vsi;
5850
5851         /* Configure filter control */
5852         memset(&settings, 0, sizeof(settings));
5853         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5854                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5855         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5856                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5857         else {
5858                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5859                         hw->func_caps.rss_table_size);
5860                 return I40E_ERR_PARAM;
5861         }
5862         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5863                 hw->func_caps.rss_table_size);
5864         pf->hash_lut_size = hw->func_caps.rss_table_size;
5865
5866         /* Enable ethtype and macvlan filters */
5867         settings.enable_ethtype = TRUE;
5868         settings.enable_macvlan = TRUE;
5869         ret = i40e_set_filter_control(hw, &settings);
5870         if (ret)
5871                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5872                                                                 ret);
5873
5874         /* Update flow control according to the auto negotiation */
5875         i40e_update_flow_control(hw);
5876
5877         return I40E_SUCCESS;
5878 }
5879
5880 int
5881 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5882 {
5883         uint32_t reg;
5884         uint16_t j;
5885
5886         /**
5887          * Set or clear TX Queue Disable flags,
5888          * which is required by hardware.
5889          */
5890         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5891         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5892
5893         /* Wait until the request is finished */
5894         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5895                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5896                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5897                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5898                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5899                                                         & 0x1))) {
5900                         break;
5901                 }
5902         }
5903         if (on) {
5904                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5905                         return I40E_SUCCESS; /* already on, skip next steps */
5906
5907                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5908                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5909         } else {
5910                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5911                         return I40E_SUCCESS; /* already off, skip next steps */
5912                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5913         }
5914         /* Write the register */
5915         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5916         /* Check the result */
5917         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5918                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5919                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5920                 if (on) {
5921                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5922                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5923                                 break;
5924                 } else {
5925                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5926                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5927                                 break;
5928                 }
5929         }
5930         /* Check if it is timeout */
5931         if (j >= I40E_CHK_Q_ENA_COUNT) {
5932                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5933                             (on ? "enable" : "disable"), q_idx);
5934                 return I40E_ERR_TIMEOUT;
5935         }
5936
5937         return I40E_SUCCESS;
5938 }
5939
5940 /* Swith on or off the tx queues */
5941 static int
5942 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5943 {
5944         struct rte_eth_dev_data *dev_data = pf->dev_data;
5945         struct i40e_tx_queue *txq;
5946         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5947         uint16_t i;
5948         int ret;
5949
5950         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5951                 txq = dev_data->tx_queues[i];
5952                 /* Don't operate the queue if not configured or
5953                  * if starting only per queue */
5954                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5955                         continue;
5956                 if (on)
5957                         ret = i40e_dev_tx_queue_start(dev, i);
5958                 else
5959                         ret = i40e_dev_tx_queue_stop(dev, i);
5960                 if ( ret != I40E_SUCCESS)
5961                         return ret;
5962         }
5963
5964         return I40E_SUCCESS;
5965 }
5966
5967 int
5968 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5969 {
5970         uint32_t reg;
5971         uint16_t j;
5972
5973         /* Wait until the request is finished */
5974         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5975                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5976                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5977                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5978                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5979                         break;
5980         }
5981
5982         if (on) {
5983                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5984                         return I40E_SUCCESS; /* Already on, skip next steps */
5985                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5986         } else {
5987                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5988                         return I40E_SUCCESS; /* Already off, skip next steps */
5989                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5990         }
5991
5992         /* Write the register */
5993         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5994         /* Check the result */
5995         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5996                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5997                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5998                 if (on) {
5999                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6000                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6001                                 break;
6002                 } else {
6003                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6004                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6005                                 break;
6006                 }
6007         }
6008
6009         /* Check if it is timeout */
6010         if (j >= I40E_CHK_Q_ENA_COUNT) {
6011                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6012                             (on ? "enable" : "disable"), q_idx);
6013                 return I40E_ERR_TIMEOUT;
6014         }
6015
6016         return I40E_SUCCESS;
6017 }
6018 /* Switch on or off the rx queues */
6019 static int
6020 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
6021 {
6022         struct rte_eth_dev_data *dev_data = pf->dev_data;
6023         struct i40e_rx_queue *rxq;
6024         struct rte_eth_dev *dev = pf->adapter->eth_dev;
6025         uint16_t i;
6026         int ret;
6027
6028         for (i = 0; i < dev_data->nb_rx_queues; i++) {
6029                 rxq = dev_data->rx_queues[i];
6030                 /* Don't operate the queue if not configured or
6031                  * if starting only per queue */
6032                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
6033                         continue;
6034                 if (on)
6035                         ret = i40e_dev_rx_queue_start(dev, i);
6036                 else
6037                         ret = i40e_dev_rx_queue_stop(dev, i);
6038                 if (ret != I40E_SUCCESS)
6039                         return ret;
6040         }
6041
6042         return I40E_SUCCESS;
6043 }
6044
6045 /* Switch on or off all the rx/tx queues */
6046 int
6047 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
6048 {
6049         int ret;
6050
6051         if (on) {
6052                 /* enable rx queues before enabling tx queues */
6053                 ret = i40e_dev_switch_rx_queues(pf, on);
6054                 if (ret) {
6055                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
6056                         return ret;
6057                 }
6058                 ret = i40e_dev_switch_tx_queues(pf, on);
6059         } else {
6060                 /* Stop tx queues before stopping rx queues */
6061                 ret = i40e_dev_switch_tx_queues(pf, on);
6062                 if (ret) {
6063                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
6064                         return ret;
6065                 }
6066                 ret = i40e_dev_switch_rx_queues(pf, on);
6067         }
6068
6069         return ret;
6070 }
6071
6072 /* Initialize VSI for TX */
6073 static int
6074 i40e_dev_tx_init(struct i40e_pf *pf)
6075 {
6076         struct rte_eth_dev_data *data = pf->dev_data;
6077         uint16_t i;
6078         uint32_t ret = I40E_SUCCESS;
6079         struct i40e_tx_queue *txq;
6080
6081         for (i = 0; i < data->nb_tx_queues; i++) {
6082                 txq = data->tx_queues[i];
6083                 if (!txq || !txq->q_set)
6084                         continue;
6085                 ret = i40e_tx_queue_init(txq);
6086                 if (ret != I40E_SUCCESS)
6087                         break;
6088         }
6089         if (ret == I40E_SUCCESS)
6090                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6091                                      ->eth_dev);
6092
6093         return ret;
6094 }
6095
6096 /* Initialize VSI for RX */
6097 static int
6098 i40e_dev_rx_init(struct i40e_pf *pf)
6099 {
6100         struct rte_eth_dev_data *data = pf->dev_data;
6101         int ret = I40E_SUCCESS;
6102         uint16_t i;
6103         struct i40e_rx_queue *rxq;
6104
6105         i40e_pf_config_mq_rx(pf);
6106         for (i = 0; i < data->nb_rx_queues; i++) {
6107                 rxq = data->rx_queues[i];
6108                 if (!rxq || !rxq->q_set)
6109                         continue;
6110
6111                 ret = i40e_rx_queue_init(rxq);
6112                 if (ret != I40E_SUCCESS) {
6113                         PMD_DRV_LOG(ERR,
6114                                 "Failed to do RX queue initialization");
6115                         break;
6116                 }
6117         }
6118         if (ret == I40E_SUCCESS)
6119                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6120                                      ->eth_dev);
6121
6122         return ret;
6123 }
6124
6125 static int
6126 i40e_dev_rxtx_init(struct i40e_pf *pf)
6127 {
6128         int err;
6129
6130         err = i40e_dev_tx_init(pf);
6131         if (err) {
6132                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6133                 return err;
6134         }
6135         err = i40e_dev_rx_init(pf);
6136         if (err) {
6137                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6138                 return err;
6139         }
6140
6141         return err;
6142 }
6143
6144 static int
6145 i40e_vmdq_setup(struct rte_eth_dev *dev)
6146 {
6147         struct rte_eth_conf *conf = &dev->data->dev_conf;
6148         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6149         int i, err, conf_vsis, j, loop;
6150         struct i40e_vsi *vsi;
6151         struct i40e_vmdq_info *vmdq_info;
6152         struct rte_eth_vmdq_rx_conf *vmdq_conf;
6153         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6154
6155         /*
6156          * Disable interrupt to avoid message from VF. Furthermore, it will
6157          * avoid race condition in VSI creation/destroy.
6158          */
6159         i40e_pf_disable_irq0(hw);
6160
6161         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6162                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6163                 return -ENOTSUP;
6164         }
6165
6166         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6167         if (conf_vsis > pf->max_nb_vmdq_vsi) {
6168                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6169                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6170                         pf->max_nb_vmdq_vsi);
6171                 return -ENOTSUP;
6172         }
6173
6174         if (pf->vmdq != NULL) {
6175                 PMD_INIT_LOG(INFO, "VMDQ already configured");
6176                 return 0;
6177         }
6178
6179         pf->vmdq = rte_zmalloc("vmdq_info_struct",
6180                                 sizeof(*vmdq_info) * conf_vsis, 0);
6181
6182         if (pf->vmdq == NULL) {
6183                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6184                 return -ENOMEM;
6185         }
6186
6187         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6188
6189         /* Create VMDQ VSI */
6190         for (i = 0; i < conf_vsis; i++) {
6191                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6192                                 vmdq_conf->enable_loop_back);
6193                 if (vsi == NULL) {
6194                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6195                         err = -1;
6196                         goto err_vsi_setup;
6197                 }
6198                 vmdq_info = &pf->vmdq[i];
6199                 vmdq_info->pf = pf;
6200                 vmdq_info->vsi = vsi;
6201         }
6202         pf->nb_cfg_vmdq_vsi = conf_vsis;
6203
6204         /* Configure Vlan */
6205         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6206         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6207                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6208                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6209                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6210                                         vmdq_conf->pool_map[i].vlan_id, j);
6211
6212                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6213                                                 vmdq_conf->pool_map[i].vlan_id);
6214                                 if (err) {
6215                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
6216                                         err = -1;
6217                                         goto err_vsi_setup;
6218                                 }
6219                         }
6220                 }
6221         }
6222
6223         i40e_pf_enable_irq0(hw);
6224
6225         return 0;
6226
6227 err_vsi_setup:
6228         for (i = 0; i < conf_vsis; i++)
6229                 if (pf->vmdq[i].vsi == NULL)
6230                         break;
6231                 else
6232                         i40e_vsi_release(pf->vmdq[i].vsi);
6233
6234         rte_free(pf->vmdq);
6235         pf->vmdq = NULL;
6236         i40e_pf_enable_irq0(hw);
6237         return err;
6238 }
6239
6240 static void
6241 i40e_stat_update_32(struct i40e_hw *hw,
6242                    uint32_t reg,
6243                    bool offset_loaded,
6244                    uint64_t *offset,
6245                    uint64_t *stat)
6246 {
6247         uint64_t new_data;
6248
6249         new_data = (uint64_t)I40E_READ_REG(hw, reg);
6250         if (!offset_loaded)
6251                 *offset = new_data;
6252
6253         if (new_data >= *offset)
6254                 *stat = (uint64_t)(new_data - *offset);
6255         else
6256                 *stat = (uint64_t)((new_data +
6257                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6258 }
6259
6260 static void
6261 i40e_stat_update_48(struct i40e_hw *hw,
6262                    uint32_t hireg,
6263                    uint32_t loreg,
6264                    bool offset_loaded,
6265                    uint64_t *offset,
6266                    uint64_t *stat)
6267 {
6268         uint64_t new_data;
6269
6270         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6271         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6272                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6273
6274         if (!offset_loaded)
6275                 *offset = new_data;
6276
6277         if (new_data >= *offset)
6278                 *stat = new_data - *offset;
6279         else
6280                 *stat = (uint64_t)((new_data +
6281                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6282
6283         *stat &= I40E_48_BIT_MASK;
6284 }
6285
6286 /* Disable IRQ0 */
6287 void
6288 i40e_pf_disable_irq0(struct i40e_hw *hw)
6289 {
6290         /* Disable all interrupt types */
6291         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6292                        I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6293         I40E_WRITE_FLUSH(hw);
6294 }
6295
6296 /* Enable IRQ0 */
6297 void
6298 i40e_pf_enable_irq0(struct i40e_hw *hw)
6299 {
6300         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6301                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6302                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6303                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6304         I40E_WRITE_FLUSH(hw);
6305 }
6306
6307 static void
6308 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6309 {
6310         /* read pending request and disable first */
6311         i40e_pf_disable_irq0(hw);
6312         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6313         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6314                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6315
6316         if (no_queue)
6317                 /* Link no queues with irq0 */
6318                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6319                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6320 }
6321
6322 static void
6323 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6324 {
6325         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6326         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6327         int i;
6328         uint16_t abs_vf_id;
6329         uint32_t index, offset, val;
6330
6331         if (!pf->vfs)
6332                 return;
6333         /**
6334          * Try to find which VF trigger a reset, use absolute VF id to access
6335          * since the reg is global register.
6336          */
6337         for (i = 0; i < pf->vf_num; i++) {
6338                 abs_vf_id = hw->func_caps.vf_base_id + i;
6339                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6340                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6341                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6342                 /* VFR event occurred */
6343                 if (val & (0x1 << offset)) {
6344                         int ret;
6345
6346                         /* Clear the event first */
6347                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6348                                                         (0x1 << offset));
6349                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6350                         /**
6351                          * Only notify a VF reset event occurred,
6352                          * don't trigger another SW reset
6353                          */
6354                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6355                         if (ret != I40E_SUCCESS)
6356                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6357                 }
6358         }
6359 }
6360
6361 static void
6362 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6363 {
6364         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6365         int i;
6366
6367         for (i = 0; i < pf->vf_num; i++)
6368                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6369 }
6370
6371 static void
6372 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6373 {
6374         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6375         struct i40e_arq_event_info info;
6376         uint16_t pending, opcode;
6377         int ret;
6378
6379         info.buf_len = I40E_AQ_BUF_SZ;
6380         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6381         if (!info.msg_buf) {
6382                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6383                 return;
6384         }
6385
6386         pending = 1;
6387         while (pending) {
6388                 ret = i40e_clean_arq_element(hw, &info, &pending);
6389
6390                 if (ret != I40E_SUCCESS) {
6391                         PMD_DRV_LOG(INFO,
6392                                 "Failed to read msg from AdminQ, aq_err: %u",
6393                                 hw->aq.asq_last_status);
6394                         break;
6395                 }
6396                 opcode = rte_le_to_cpu_16(info.desc.opcode);
6397
6398                 switch (opcode) {
6399                 case i40e_aqc_opc_send_msg_to_pf:
6400                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6401                         i40e_pf_host_handle_vf_msg(dev,
6402                                         rte_le_to_cpu_16(info.desc.retval),
6403                                         rte_le_to_cpu_32(info.desc.cookie_high),
6404                                         rte_le_to_cpu_32(info.desc.cookie_low),
6405                                         info.msg_buf,
6406                                         info.msg_len);
6407                         break;
6408                 case i40e_aqc_opc_get_link_status:
6409                         ret = i40e_dev_link_update(dev, 0);
6410                         if (!ret)
6411                                 _rte_eth_dev_callback_process(dev,
6412                                         RTE_ETH_EVENT_INTR_LSC, NULL);
6413                         break;
6414                 default:
6415                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6416                                     opcode);
6417                         break;
6418                 }
6419         }
6420         rte_free(info.msg_buf);
6421 }
6422
6423 /**
6424  * Interrupt handler triggered by NIC  for handling
6425  * specific interrupt.
6426  *
6427  * @param handle
6428  *  Pointer to interrupt handle.
6429  * @param param
6430  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6431  *
6432  * @return
6433  *  void
6434  */
6435 static void
6436 i40e_dev_interrupt_handler(void *param)
6437 {
6438         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6439         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6440         uint32_t icr0;
6441
6442         /* Disable interrupt */
6443         i40e_pf_disable_irq0(hw);
6444
6445         /* read out interrupt causes */
6446         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6447
6448         /* No interrupt event indicated */
6449         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6450                 PMD_DRV_LOG(INFO, "No interrupt event");
6451                 goto done;
6452         }
6453         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6454                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6455         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6456                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6457         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6458                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6459         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6460                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6461         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6462                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6463         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6464                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6465         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6466                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6467
6468         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6469                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6470                 i40e_dev_handle_vfr_event(dev);
6471         }
6472         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6473                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6474                 i40e_dev_handle_aq_msg(dev);
6475         }
6476
6477 done:
6478         /* Enable interrupt */
6479         i40e_pf_enable_irq0(hw);
6480         rte_intr_enable(dev->intr_handle);
6481 }
6482
6483 int
6484 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6485                          struct i40e_macvlan_filter *filter,
6486                          int total)
6487 {
6488         int ele_num, ele_buff_size;
6489         int num, actual_num, i;
6490         uint16_t flags;
6491         int ret = I40E_SUCCESS;
6492         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6493         struct i40e_aqc_add_macvlan_element_data *req_list;
6494
6495         if (filter == NULL  || total == 0)
6496                 return I40E_ERR_PARAM;
6497         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6498         ele_buff_size = hw->aq.asq_buf_size;
6499
6500         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6501         if (req_list == NULL) {
6502                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6503                 return I40E_ERR_NO_MEMORY;
6504         }
6505
6506         num = 0;
6507         do {
6508                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6509                 memset(req_list, 0, ele_buff_size);
6510
6511                 for (i = 0; i < actual_num; i++) {
6512                         rte_memcpy(req_list[i].mac_addr,
6513                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6514                         req_list[i].vlan_tag =
6515                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6516
6517                         switch (filter[num + i].filter_type) {
6518                         case RTE_MAC_PERFECT_MATCH:
6519                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6520                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6521                                 break;
6522                         case RTE_MACVLAN_PERFECT_MATCH:
6523                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6524                                 break;
6525                         case RTE_MAC_HASH_MATCH:
6526                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6527                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6528                                 break;
6529                         case RTE_MACVLAN_HASH_MATCH:
6530                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6531                                 break;
6532                         default:
6533                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6534                                 ret = I40E_ERR_PARAM;
6535                                 goto DONE;
6536                         }
6537
6538                         req_list[i].queue_number = 0;
6539
6540                         req_list[i].flags = rte_cpu_to_le_16(flags);
6541                 }
6542
6543                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6544                                                 actual_num, NULL);
6545                 if (ret != I40E_SUCCESS) {
6546                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6547                         goto DONE;
6548                 }
6549                 num += actual_num;
6550         } while (num < total);
6551
6552 DONE:
6553         rte_free(req_list);
6554         return ret;
6555 }
6556
6557 int
6558 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6559                             struct i40e_macvlan_filter *filter,
6560                             int total)
6561 {
6562         int ele_num, ele_buff_size;
6563         int num, actual_num, i;
6564         uint16_t flags;
6565         int ret = I40E_SUCCESS;
6566         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6567         struct i40e_aqc_remove_macvlan_element_data *req_list;
6568
6569         if (filter == NULL  || total == 0)
6570                 return I40E_ERR_PARAM;
6571
6572         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6573         ele_buff_size = hw->aq.asq_buf_size;
6574
6575         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6576         if (req_list == NULL) {
6577                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6578                 return I40E_ERR_NO_MEMORY;
6579         }
6580
6581         num = 0;
6582         do {
6583                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6584                 memset(req_list, 0, ele_buff_size);
6585
6586                 for (i = 0; i < actual_num; i++) {
6587                         rte_memcpy(req_list[i].mac_addr,
6588                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6589                         req_list[i].vlan_tag =
6590                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6591
6592                         switch (filter[num + i].filter_type) {
6593                         case RTE_MAC_PERFECT_MATCH:
6594                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6595                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6596                                 break;
6597                         case RTE_MACVLAN_PERFECT_MATCH:
6598                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6599                                 break;
6600                         case RTE_MAC_HASH_MATCH:
6601                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6602                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6603                                 break;
6604                         case RTE_MACVLAN_HASH_MATCH:
6605                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6606                                 break;
6607                         default:
6608                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6609                                 ret = I40E_ERR_PARAM;
6610                                 goto DONE;
6611                         }
6612                         req_list[i].flags = rte_cpu_to_le_16(flags);
6613                 }
6614
6615                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6616                                                 actual_num, NULL);
6617                 if (ret != I40E_SUCCESS) {
6618                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6619                         goto DONE;
6620                 }
6621                 num += actual_num;
6622         } while (num < total);
6623
6624 DONE:
6625         rte_free(req_list);
6626         return ret;
6627 }
6628
6629 /* Find out specific MAC filter */
6630 static struct i40e_mac_filter *
6631 i40e_find_mac_filter(struct i40e_vsi *vsi,
6632                          struct ether_addr *macaddr)
6633 {
6634         struct i40e_mac_filter *f;
6635
6636         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6637                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6638                         return f;
6639         }
6640
6641         return NULL;
6642 }
6643
6644 static bool
6645 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6646                          uint16_t vlan_id)
6647 {
6648         uint32_t vid_idx, vid_bit;
6649
6650         if (vlan_id > ETH_VLAN_ID_MAX)
6651                 return 0;
6652
6653         vid_idx = I40E_VFTA_IDX(vlan_id);
6654         vid_bit = I40E_VFTA_BIT(vlan_id);
6655
6656         if (vsi->vfta[vid_idx] & vid_bit)
6657                 return 1;
6658         else
6659                 return 0;
6660 }
6661
6662 static void
6663 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6664                        uint16_t vlan_id, bool on)
6665 {
6666         uint32_t vid_idx, vid_bit;
6667
6668         vid_idx = I40E_VFTA_IDX(vlan_id);
6669         vid_bit = I40E_VFTA_BIT(vlan_id);
6670
6671         if (on)
6672                 vsi->vfta[vid_idx] |= vid_bit;
6673         else
6674                 vsi->vfta[vid_idx] &= ~vid_bit;
6675 }
6676
6677 void
6678 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6679                      uint16_t vlan_id, bool on)
6680 {
6681         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6682         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6683         int ret;
6684
6685         if (vlan_id > ETH_VLAN_ID_MAX)
6686                 return;
6687
6688         i40e_store_vlan_filter(vsi, vlan_id, on);
6689
6690         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6691                 return;
6692
6693         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6694
6695         if (on) {
6696                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6697                                        &vlan_data, 1, NULL);
6698                 if (ret != I40E_SUCCESS)
6699                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6700         } else {
6701                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6702                                           &vlan_data, 1, NULL);
6703                 if (ret != I40E_SUCCESS)
6704                         PMD_DRV_LOG(ERR,
6705                                     "Failed to remove vlan filter");
6706         }
6707 }
6708
6709 /**
6710  * Find all vlan options for specific mac addr,
6711  * return with actual vlan found.
6712  */
6713 int
6714 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6715                            struct i40e_macvlan_filter *mv_f,
6716                            int num, struct ether_addr *addr)
6717 {
6718         int i;
6719         uint32_t j, k;
6720
6721         /**
6722          * Not to use i40e_find_vlan_filter to decrease the loop time,
6723          * although the code looks complex.
6724           */
6725         if (num < vsi->vlan_num)
6726                 return I40E_ERR_PARAM;
6727
6728         i = 0;
6729         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6730                 if (vsi->vfta[j]) {
6731                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6732                                 if (vsi->vfta[j] & (1 << k)) {
6733                                         if (i > num - 1) {
6734                                                 PMD_DRV_LOG(ERR,
6735                                                         "vlan number doesn't match");
6736                                                 return I40E_ERR_PARAM;
6737                                         }
6738                                         rte_memcpy(&mv_f[i].macaddr,
6739                                                         addr, ETH_ADDR_LEN);
6740                                         mv_f[i].vlan_id =
6741                                                 j * I40E_UINT32_BIT_SIZE + k;
6742                                         i++;
6743                                 }
6744                         }
6745                 }
6746         }
6747         return I40E_SUCCESS;
6748 }
6749
6750 static inline int
6751 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6752                            struct i40e_macvlan_filter *mv_f,
6753                            int num,
6754                            uint16_t vlan)
6755 {
6756         int i = 0;
6757         struct i40e_mac_filter *f;
6758
6759         if (num < vsi->mac_num)
6760                 return I40E_ERR_PARAM;
6761
6762         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6763                 if (i > num - 1) {
6764                         PMD_DRV_LOG(ERR, "buffer number not match");
6765                         return I40E_ERR_PARAM;
6766                 }
6767                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6768                                 ETH_ADDR_LEN);
6769                 mv_f[i].vlan_id = vlan;
6770                 mv_f[i].filter_type = f->mac_info.filter_type;
6771                 i++;
6772         }
6773
6774         return I40E_SUCCESS;
6775 }
6776
6777 static int
6778 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6779 {
6780         int i, j, num;
6781         struct i40e_mac_filter *f;
6782         struct i40e_macvlan_filter *mv_f;
6783         int ret = I40E_SUCCESS;
6784
6785         if (vsi == NULL || vsi->mac_num == 0)
6786                 return I40E_ERR_PARAM;
6787
6788         /* Case that no vlan is set */
6789         if (vsi->vlan_num == 0)
6790                 num = vsi->mac_num;
6791         else
6792                 num = vsi->mac_num * vsi->vlan_num;
6793
6794         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6795         if (mv_f == NULL) {
6796                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6797                 return I40E_ERR_NO_MEMORY;
6798         }
6799
6800         i = 0;
6801         if (vsi->vlan_num == 0) {
6802                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6803                         rte_memcpy(&mv_f[i].macaddr,
6804                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6805                         mv_f[i].filter_type = f->mac_info.filter_type;
6806                         mv_f[i].vlan_id = 0;
6807                         i++;
6808                 }
6809         } else {
6810                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6811                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6812                                         vsi->vlan_num, &f->mac_info.mac_addr);
6813                         if (ret != I40E_SUCCESS)
6814                                 goto DONE;
6815                         for (j = i; j < i + vsi->vlan_num; j++)
6816                                 mv_f[j].filter_type = f->mac_info.filter_type;
6817                         i += vsi->vlan_num;
6818                 }
6819         }
6820
6821         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6822 DONE:
6823         rte_free(mv_f);
6824
6825         return ret;
6826 }
6827
6828 int
6829 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6830 {
6831         struct i40e_macvlan_filter *mv_f;
6832         int mac_num;
6833         int ret = I40E_SUCCESS;
6834
6835         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6836                 return I40E_ERR_PARAM;
6837
6838         /* If it's already set, just return */
6839         if (i40e_find_vlan_filter(vsi,vlan))
6840                 return I40E_SUCCESS;
6841
6842         mac_num = vsi->mac_num;
6843
6844         if (mac_num == 0) {
6845                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6846                 return I40E_ERR_PARAM;
6847         }
6848
6849         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6850
6851         if (mv_f == NULL) {
6852                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6853                 return I40E_ERR_NO_MEMORY;
6854         }
6855
6856         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6857
6858         if (ret != I40E_SUCCESS)
6859                 goto DONE;
6860
6861         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6862
6863         if (ret != I40E_SUCCESS)
6864                 goto DONE;
6865
6866         i40e_set_vlan_filter(vsi, vlan, 1);
6867
6868         vsi->vlan_num++;
6869         ret = I40E_SUCCESS;
6870 DONE:
6871         rte_free(mv_f);
6872         return ret;
6873 }
6874
6875 int
6876 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6877 {
6878         struct i40e_macvlan_filter *mv_f;
6879         int mac_num;
6880         int ret = I40E_SUCCESS;
6881
6882         /**
6883          * Vlan 0 is the generic filter for untagged packets
6884          * and can't be removed.
6885          */
6886         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6887                 return I40E_ERR_PARAM;
6888
6889         /* If can't find it, just return */
6890         if (!i40e_find_vlan_filter(vsi, vlan))
6891                 return I40E_ERR_PARAM;
6892
6893         mac_num = vsi->mac_num;
6894
6895         if (mac_num == 0) {
6896                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6897                 return I40E_ERR_PARAM;
6898         }
6899
6900         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6901
6902         if (mv_f == NULL) {
6903                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6904                 return I40E_ERR_NO_MEMORY;
6905         }
6906
6907         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6908
6909         if (ret != I40E_SUCCESS)
6910                 goto DONE;
6911
6912         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6913
6914         if (ret != I40E_SUCCESS)
6915                 goto DONE;
6916
6917         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6918         if (vsi->vlan_num == 1) {
6919                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6920                 if (ret != I40E_SUCCESS)
6921                         goto DONE;
6922
6923                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6924                 if (ret != I40E_SUCCESS)
6925                         goto DONE;
6926         }
6927
6928         i40e_set_vlan_filter(vsi, vlan, 0);
6929
6930         vsi->vlan_num--;
6931         ret = I40E_SUCCESS;
6932 DONE:
6933         rte_free(mv_f);
6934         return ret;
6935 }
6936
6937 int
6938 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6939 {
6940         struct i40e_mac_filter *f;
6941         struct i40e_macvlan_filter *mv_f;
6942         int i, vlan_num = 0;
6943         int ret = I40E_SUCCESS;
6944
6945         /* If it's add and we've config it, return */
6946         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6947         if (f != NULL)
6948                 return I40E_SUCCESS;
6949         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6950                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6951
6952                 /**
6953                  * If vlan_num is 0, that's the first time to add mac,
6954                  * set mask for vlan_id 0.
6955                  */
6956                 if (vsi->vlan_num == 0) {
6957                         i40e_set_vlan_filter(vsi, 0, 1);
6958                         vsi->vlan_num = 1;
6959                 }
6960                 vlan_num = vsi->vlan_num;
6961         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6962                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6963                 vlan_num = 1;
6964
6965         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6966         if (mv_f == NULL) {
6967                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6968                 return I40E_ERR_NO_MEMORY;
6969         }
6970
6971         for (i = 0; i < vlan_num; i++) {
6972                 mv_f[i].filter_type = mac_filter->filter_type;
6973                 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6974                                 ETH_ADDR_LEN);
6975         }
6976
6977         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6978                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6979                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6980                                         &mac_filter->mac_addr);
6981                 if (ret != I40E_SUCCESS)
6982                         goto DONE;
6983         }
6984
6985         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6986         if (ret != I40E_SUCCESS)
6987                 goto DONE;
6988
6989         /* Add the mac addr into mac list */
6990         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6991         if (f == NULL) {
6992                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6993                 ret = I40E_ERR_NO_MEMORY;
6994                 goto DONE;
6995         }
6996         rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6997                         ETH_ADDR_LEN);
6998         f->mac_info.filter_type = mac_filter->filter_type;
6999         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7000         vsi->mac_num++;
7001
7002         ret = I40E_SUCCESS;
7003 DONE:
7004         rte_free(mv_f);
7005
7006         return ret;
7007 }
7008
7009 int
7010 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
7011 {
7012         struct i40e_mac_filter *f;
7013         struct i40e_macvlan_filter *mv_f;
7014         int i, vlan_num;
7015         enum rte_mac_filter_type filter_type;
7016         int ret = I40E_SUCCESS;
7017
7018         /* Can't find it, return an error */
7019         f = i40e_find_mac_filter(vsi, addr);
7020         if (f == NULL)
7021                 return I40E_ERR_PARAM;
7022
7023         vlan_num = vsi->vlan_num;
7024         filter_type = f->mac_info.filter_type;
7025         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7026                 filter_type == RTE_MACVLAN_HASH_MATCH) {
7027                 if (vlan_num == 0) {
7028                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7029                         return I40E_ERR_PARAM;
7030                 }
7031         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7032                         filter_type == RTE_MAC_HASH_MATCH)
7033                 vlan_num = 1;
7034
7035         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7036         if (mv_f == NULL) {
7037                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7038                 return I40E_ERR_NO_MEMORY;
7039         }
7040
7041         for (i = 0; i < vlan_num; i++) {
7042                 mv_f[i].filter_type = filter_type;
7043                 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7044                                 ETH_ADDR_LEN);
7045         }
7046         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7047                         filter_type == RTE_MACVLAN_HASH_MATCH) {
7048                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7049                 if (ret != I40E_SUCCESS)
7050                         goto DONE;
7051         }
7052
7053         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7054         if (ret != I40E_SUCCESS)
7055                 goto DONE;
7056
7057         /* Remove the mac addr into mac list */
7058         TAILQ_REMOVE(&vsi->mac_list, f, next);
7059         rte_free(f);
7060         vsi->mac_num--;
7061
7062         ret = I40E_SUCCESS;
7063 DONE:
7064         rte_free(mv_f);
7065         return ret;
7066 }
7067
7068 /* Configure hash enable flags for RSS */
7069 uint64_t
7070 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7071 {
7072         uint64_t hena = 0;
7073         int i;
7074
7075         if (!flags)
7076                 return hena;
7077
7078         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7079                 if (flags & (1ULL << i))
7080                         hena |= adapter->pctypes_tbl[i];
7081         }
7082
7083         return hena;
7084 }
7085
7086 /* Parse the hash enable flags */
7087 uint64_t
7088 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7089 {
7090         uint64_t rss_hf = 0;
7091
7092         if (!flags)
7093                 return rss_hf;
7094         int i;
7095
7096         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7097                 if (flags & adapter->pctypes_tbl[i])
7098                         rss_hf |= (1ULL << i);
7099         }
7100         return rss_hf;
7101 }
7102
7103 /* Disable RSS */
7104 static void
7105 i40e_pf_disable_rss(struct i40e_pf *pf)
7106 {
7107         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7108
7109         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7110         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7111         I40E_WRITE_FLUSH(hw);
7112 }
7113
7114 int
7115 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7116 {
7117         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7118         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7119         uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7120                            I40E_VFQF_HKEY_MAX_INDEX :
7121                            I40E_PFQF_HKEY_MAX_INDEX;
7122         int ret = 0;
7123
7124         if (!key || key_len == 0) {
7125                 PMD_DRV_LOG(DEBUG, "No key to be configured");
7126                 return 0;
7127         } else if (key_len != (key_idx + 1) *
7128                 sizeof(uint32_t)) {
7129                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7130                 return -EINVAL;
7131         }
7132
7133         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7134                 struct i40e_aqc_get_set_rss_key_data *key_dw =
7135                         (struct i40e_aqc_get_set_rss_key_data *)key;
7136
7137                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7138                 if (ret)
7139                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7140         } else {
7141                 uint32_t *hash_key = (uint32_t *)key;
7142                 uint16_t i;
7143
7144                 if (vsi->type == I40E_VSI_SRIOV) {
7145                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7146                                 I40E_WRITE_REG(
7147                                         hw,
7148                                         I40E_VFQF_HKEY1(i, vsi->user_param),
7149                                         hash_key[i]);
7150
7151                 } else {
7152                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7153                                 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7154                                                hash_key[i]);
7155                 }
7156                 I40E_WRITE_FLUSH(hw);
7157         }
7158
7159         return ret;
7160 }
7161
7162 static int
7163 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7164 {
7165         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7166         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7167         uint32_t reg;
7168         int ret;
7169
7170         if (!key || !key_len)
7171                 return -EINVAL;
7172
7173         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7174                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7175                         (struct i40e_aqc_get_set_rss_key_data *)key);
7176                 if (ret) {
7177                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7178                         return ret;
7179                 }
7180         } else {
7181                 uint32_t *key_dw = (uint32_t *)key;
7182                 uint16_t i;
7183
7184                 if (vsi->type == I40E_VSI_SRIOV) {
7185                         for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7186                                 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7187                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7188                         }
7189                         *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7190                                    sizeof(uint32_t);
7191                 } else {
7192                         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7193                                 reg = I40E_PFQF_HKEY(i);
7194                                 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7195                         }
7196                         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7197                                    sizeof(uint32_t);
7198                 }
7199         }
7200         return 0;
7201 }
7202
7203 static int
7204 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7205 {
7206         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7207         uint64_t hena;
7208         int ret;
7209
7210         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7211                                rss_conf->rss_key_len);
7212         if (ret)
7213                 return ret;
7214
7215         hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7216         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7217         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7218         I40E_WRITE_FLUSH(hw);
7219
7220         return 0;
7221 }
7222
7223 static int
7224 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7225                          struct rte_eth_rss_conf *rss_conf)
7226 {
7227         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7228         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7229         uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7230         uint64_t hena;
7231
7232         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7233         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7234
7235         if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7236                 if (rss_hf != 0) /* Enable RSS */
7237                         return -EINVAL;
7238                 return 0; /* Nothing to do */
7239         }
7240         /* RSS enabled */
7241         if (rss_hf == 0) /* Disable RSS */
7242                 return -EINVAL;
7243
7244         return i40e_hw_rss_hash_set(pf, rss_conf);
7245 }
7246
7247 static int
7248 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7249                            struct rte_eth_rss_conf *rss_conf)
7250 {
7251         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7252         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7253         uint64_t hena;
7254
7255         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7256                          &rss_conf->rss_key_len);
7257
7258         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7259         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7260         rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7261
7262         return 0;
7263 }
7264
7265 static int
7266 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7267 {
7268         switch (filter_type) {
7269         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7270                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7271                 break;
7272         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7273                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7274                 break;
7275         case RTE_TUNNEL_FILTER_IMAC_TENID:
7276                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7277                 break;
7278         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7279                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7280                 break;
7281         case ETH_TUNNEL_FILTER_IMAC:
7282                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7283                 break;
7284         case ETH_TUNNEL_FILTER_OIP:
7285                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7286                 break;
7287         case ETH_TUNNEL_FILTER_IIP:
7288                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7289                 break;
7290         default:
7291                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7292                 return -EINVAL;
7293         }
7294
7295         return 0;
7296 }
7297
7298 /* Convert tunnel filter structure */
7299 static int
7300 i40e_tunnel_filter_convert(
7301         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7302         struct i40e_tunnel_filter *tunnel_filter)
7303 {
7304         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7305                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
7306         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7307                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
7308         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7309         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7310              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7311             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7312                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7313         else
7314                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7315         tunnel_filter->input.flags = cld_filter->element.flags;
7316         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7317         tunnel_filter->queue = cld_filter->element.queue_number;
7318         rte_memcpy(tunnel_filter->input.general_fields,
7319                    cld_filter->general_fields,
7320                    sizeof(cld_filter->general_fields));
7321
7322         return 0;
7323 }
7324
7325 /* Check if there exists the tunnel filter */
7326 struct i40e_tunnel_filter *
7327 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7328                              const struct i40e_tunnel_filter_input *input)
7329 {
7330         int ret;
7331
7332         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7333         if (ret < 0)
7334                 return NULL;
7335
7336         return tunnel_rule->hash_map[ret];
7337 }
7338
7339 /* Add a tunnel filter into the SW list */
7340 static int
7341 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7342                              struct i40e_tunnel_filter *tunnel_filter)
7343 {
7344         struct i40e_tunnel_rule *rule = &pf->tunnel;
7345         int ret;
7346
7347         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7348         if (ret < 0) {
7349                 PMD_DRV_LOG(ERR,
7350                             "Failed to insert tunnel filter to hash table %d!",
7351                             ret);
7352                 return ret;
7353         }
7354         rule->hash_map[ret] = tunnel_filter;
7355
7356         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7357
7358         return 0;
7359 }
7360
7361 /* Delete a tunnel filter from the SW list */
7362 int
7363 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7364                           struct i40e_tunnel_filter_input *input)
7365 {
7366         struct i40e_tunnel_rule *rule = &pf->tunnel;
7367         struct i40e_tunnel_filter *tunnel_filter;
7368         int ret;
7369
7370         ret = rte_hash_del_key(rule->hash_table, input);
7371         if (ret < 0) {
7372                 PMD_DRV_LOG(ERR,
7373                             "Failed to delete tunnel filter to hash table %d!",
7374                             ret);
7375                 return ret;
7376         }
7377         tunnel_filter = rule->hash_map[ret];
7378         rule->hash_map[ret] = NULL;
7379
7380         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7381         rte_free(tunnel_filter);
7382
7383         return 0;
7384 }
7385
7386 int
7387 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7388                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
7389                         uint8_t add)
7390 {
7391         uint16_t ip_type;
7392         uint32_t ipv4_addr, ipv4_addr_le;
7393         uint8_t i, tun_type = 0;
7394         /* internal varialbe to convert ipv6 byte order */
7395         uint32_t convert_ipv6[4];
7396         int val, ret = 0;
7397         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7398         struct i40e_vsi *vsi = pf->main_vsi;
7399         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7400         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7401         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7402         struct i40e_tunnel_filter *tunnel, *node;
7403         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7404
7405         cld_filter = rte_zmalloc("tunnel_filter",
7406                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7407         0);
7408
7409         if (NULL == cld_filter) {
7410                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7411                 return -ENOMEM;
7412         }
7413         pfilter = cld_filter;
7414
7415         ether_addr_copy(&tunnel_filter->outer_mac,
7416                         (struct ether_addr *)&pfilter->element.outer_mac);
7417         ether_addr_copy(&tunnel_filter->inner_mac,
7418                         (struct ether_addr *)&pfilter->element.inner_mac);
7419
7420         pfilter->element.inner_vlan =
7421                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7422         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7423                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7424                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7425                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7426                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7427                                 &ipv4_addr_le,
7428                                 sizeof(pfilter->element.ipaddr.v4.data));
7429         } else {
7430                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7431                 for (i = 0; i < 4; i++) {
7432                         convert_ipv6[i] =
7433                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7434                 }
7435                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7436                            &convert_ipv6,
7437                            sizeof(pfilter->element.ipaddr.v6.data));
7438         }
7439
7440         /* check tunneled type */
7441         switch (tunnel_filter->tunnel_type) {
7442         case RTE_TUNNEL_TYPE_VXLAN:
7443                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7444                 break;
7445         case RTE_TUNNEL_TYPE_NVGRE:
7446                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7447                 break;
7448         case RTE_TUNNEL_TYPE_IP_IN_GRE:
7449                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7450                 break;
7451         default:
7452                 /* Other tunnel types is not supported. */
7453                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7454                 rte_free(cld_filter);
7455                 return -EINVAL;
7456         }
7457
7458         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7459                                        &pfilter->element.flags);
7460         if (val < 0) {
7461                 rte_free(cld_filter);
7462                 return -EINVAL;
7463         }
7464
7465         pfilter->element.flags |= rte_cpu_to_le_16(
7466                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7467                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7468         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7469         pfilter->element.queue_number =
7470                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7471
7472         /* Check if there is the filter in SW list */
7473         memset(&check_filter, 0, sizeof(check_filter));
7474         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7475         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7476         if (add && node) {
7477                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7478                 rte_free(cld_filter);
7479                 return -EINVAL;
7480         }
7481
7482         if (!add && !node) {
7483                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7484                 rte_free(cld_filter);
7485                 return -EINVAL;
7486         }
7487
7488         if (add) {
7489                 ret = i40e_aq_add_cloud_filters(hw,
7490                                         vsi->seid, &cld_filter->element, 1);
7491                 if (ret < 0) {
7492                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7493                         rte_free(cld_filter);
7494                         return -ENOTSUP;
7495                 }
7496                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7497                 if (tunnel == NULL) {
7498                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7499                         rte_free(cld_filter);
7500                         return -ENOMEM;
7501                 }
7502
7503                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7504                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7505                 if (ret < 0)
7506                         rte_free(tunnel);
7507         } else {
7508                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7509                                                    &cld_filter->element, 1);
7510                 if (ret < 0) {
7511                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7512                         rte_free(cld_filter);
7513                         return -ENOTSUP;
7514                 }
7515                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7516         }
7517
7518         rte_free(cld_filter);
7519         return ret;
7520 }
7521
7522 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7523 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
7524 #define I40E_TR_GENEVE_KEY_MASK                 0x8
7525 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
7526 #define I40E_TR_GRE_KEY_MASK                    0x400
7527 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
7528 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
7529
7530 static enum
7531 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7532 {
7533         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7534         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7535         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7536         enum i40e_status_code status = I40E_SUCCESS;
7537
7538         if (pf->support_multi_driver) {
7539                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7540                 return I40E_NOT_SUPPORTED;
7541         }
7542
7543         memset(&filter_replace, 0,
7544                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7545         memset(&filter_replace_buf, 0,
7546                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7547
7548         /* create L1 filter */
7549         filter_replace.old_filter_type =
7550                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7551         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7552         filter_replace.tr_bit = 0;
7553
7554         /* Prepare the buffer, 3 entries */
7555         filter_replace_buf.data[0] =
7556                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7557         filter_replace_buf.data[0] |=
7558                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7559         filter_replace_buf.data[2] = 0xFF;
7560         filter_replace_buf.data[3] = 0xFF;
7561         filter_replace_buf.data[4] =
7562                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7563         filter_replace_buf.data[4] |=
7564                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7565         filter_replace_buf.data[7] = 0xF0;
7566         filter_replace_buf.data[8]
7567                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7568         filter_replace_buf.data[8] |=
7569                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7570         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7571                 I40E_TR_GENEVE_KEY_MASK |
7572                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7573         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7574                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7575                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7576
7577         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7578                                                &filter_replace_buf);
7579         if (!status) {
7580                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7581                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7582                             "cloud l1 type is changed from 0x%x to 0x%x",
7583                             filter_replace.old_filter_type,
7584                             filter_replace.new_filter_type);
7585         }
7586         return status;
7587 }
7588
7589 static enum
7590 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7591 {
7592         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7593         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7594         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7595         enum i40e_status_code status = I40E_SUCCESS;
7596
7597         if (pf->support_multi_driver) {
7598                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7599                 return I40E_NOT_SUPPORTED;
7600         }
7601
7602         /* For MPLSoUDP */
7603         memset(&filter_replace, 0,
7604                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7605         memset(&filter_replace_buf, 0,
7606                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7607         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7608                 I40E_AQC_MIRROR_CLOUD_FILTER;
7609         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7610         filter_replace.new_filter_type =
7611                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7612         /* Prepare the buffer, 2 entries */
7613         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7614         filter_replace_buf.data[0] |=
7615                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7616         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7617         filter_replace_buf.data[4] |=
7618                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7619         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7620                                                &filter_replace_buf);
7621         if (status < 0)
7622                 return status;
7623         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7624                     "cloud filter type is changed from 0x%x to 0x%x",
7625                     filter_replace.old_filter_type,
7626                     filter_replace.new_filter_type);
7627
7628         /* For MPLSoGRE */
7629         memset(&filter_replace, 0,
7630                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7631         memset(&filter_replace_buf, 0,
7632                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7633
7634         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7635                 I40E_AQC_MIRROR_CLOUD_FILTER;
7636         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7637         filter_replace.new_filter_type =
7638                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7639         /* Prepare the buffer, 2 entries */
7640         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7641         filter_replace_buf.data[0] |=
7642                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7643         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7644         filter_replace_buf.data[4] |=
7645                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7646
7647         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7648                                                &filter_replace_buf);
7649         if (!status) {
7650                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7651                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7652                             "cloud filter type is changed from 0x%x to 0x%x",
7653                             filter_replace.old_filter_type,
7654                             filter_replace.new_filter_type);
7655         }
7656         return status;
7657 }
7658
7659 static enum i40e_status_code
7660 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7661 {
7662         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7663         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7664         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7665         enum i40e_status_code status = I40E_SUCCESS;
7666
7667         if (pf->support_multi_driver) {
7668                 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7669                 return I40E_NOT_SUPPORTED;
7670         }
7671
7672         /* For GTP-C */
7673         memset(&filter_replace, 0,
7674                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7675         memset(&filter_replace_buf, 0,
7676                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7677         /* create L1 filter */
7678         filter_replace.old_filter_type =
7679                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7680         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7681         filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7682                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7683         /* Prepare the buffer, 2 entries */
7684         filter_replace_buf.data[0] =
7685                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7686         filter_replace_buf.data[0] |=
7687                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7688         filter_replace_buf.data[2] = 0xFF;
7689         filter_replace_buf.data[3] = 0xFF;
7690         filter_replace_buf.data[4] =
7691                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7692         filter_replace_buf.data[4] |=
7693                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7694         filter_replace_buf.data[6] = 0xFF;
7695         filter_replace_buf.data[7] = 0xFF;
7696         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7697                                                &filter_replace_buf);
7698         if (status < 0)
7699                 return status;
7700         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7701                     "cloud l1 type is changed from 0x%x to 0x%x",
7702                     filter_replace.old_filter_type,
7703                     filter_replace.new_filter_type);
7704
7705         /* for GTP-U */
7706         memset(&filter_replace, 0,
7707                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7708         memset(&filter_replace_buf, 0,
7709                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7710         /* create L1 filter */
7711         filter_replace.old_filter_type =
7712                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7713         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7714         filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7715                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7716         /* Prepare the buffer, 2 entries */
7717         filter_replace_buf.data[0] =
7718                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7719         filter_replace_buf.data[0] |=
7720                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7721         filter_replace_buf.data[2] = 0xFF;
7722         filter_replace_buf.data[3] = 0xFF;
7723         filter_replace_buf.data[4] =
7724                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7725         filter_replace_buf.data[4] |=
7726                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7727         filter_replace_buf.data[6] = 0xFF;
7728         filter_replace_buf.data[7] = 0xFF;
7729
7730         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7731                                                &filter_replace_buf);
7732         if (!status) {
7733                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7734                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7735                             "cloud l1 type is changed from 0x%x to 0x%x",
7736                             filter_replace.old_filter_type,
7737                             filter_replace.new_filter_type);
7738         }
7739         return status;
7740 }
7741
7742 static enum
7743 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7744 {
7745         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7746         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7747         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7748         enum i40e_status_code status = I40E_SUCCESS;
7749
7750         if (pf->support_multi_driver) {
7751                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7752                 return I40E_NOT_SUPPORTED;
7753         }
7754
7755         /* for GTP-C */
7756         memset(&filter_replace, 0,
7757                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7758         memset(&filter_replace_buf, 0,
7759                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7760         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7761         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7762         filter_replace.new_filter_type =
7763                 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7764         /* Prepare the buffer, 2 entries */
7765         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7766         filter_replace_buf.data[0] |=
7767                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7768         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7769         filter_replace_buf.data[4] |=
7770                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7771         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7772                                                &filter_replace_buf);
7773         if (status < 0)
7774                 return status;
7775         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7776                     "cloud filter type is changed from 0x%x to 0x%x",
7777                     filter_replace.old_filter_type,
7778                     filter_replace.new_filter_type);
7779
7780         /* for GTP-U */
7781         memset(&filter_replace, 0,
7782                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7783         memset(&filter_replace_buf, 0,
7784                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7785         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7786         filter_replace.old_filter_type =
7787                 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7788         filter_replace.new_filter_type =
7789                 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7790         /* Prepare the buffer, 2 entries */
7791         filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7792         filter_replace_buf.data[0] |=
7793                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7794         filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7795         filter_replace_buf.data[4] |=
7796                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7797
7798         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7799                                                &filter_replace_buf);
7800         if (!status) {
7801                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
7802                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
7803                             "cloud filter type is changed from 0x%x to 0x%x",
7804                             filter_replace.old_filter_type,
7805                             filter_replace.new_filter_type);
7806         }
7807         return status;
7808 }
7809
7810 int
7811 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7812                       struct i40e_tunnel_filter_conf *tunnel_filter,
7813                       uint8_t add)
7814 {
7815         uint16_t ip_type;
7816         uint32_t ipv4_addr, ipv4_addr_le;
7817         uint8_t i, tun_type = 0;
7818         /* internal variable to convert ipv6 byte order */
7819         uint32_t convert_ipv6[4];
7820         int val, ret = 0;
7821         struct i40e_pf_vf *vf = NULL;
7822         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7823         struct i40e_vsi *vsi;
7824         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7825         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7826         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7827         struct i40e_tunnel_filter *tunnel, *node;
7828         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7829         uint32_t teid_le;
7830         bool big_buffer = 0;
7831
7832         cld_filter = rte_zmalloc("tunnel_filter",
7833                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7834                          0);
7835
7836         if (cld_filter == NULL) {
7837                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7838                 return -ENOMEM;
7839         }
7840         pfilter = cld_filter;
7841
7842         ether_addr_copy(&tunnel_filter->outer_mac,
7843                         (struct ether_addr *)&pfilter->element.outer_mac);
7844         ether_addr_copy(&tunnel_filter->inner_mac,
7845                         (struct ether_addr *)&pfilter->element.inner_mac);
7846
7847         pfilter->element.inner_vlan =
7848                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7849         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7850                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7851                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7852                 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7853                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7854                                 &ipv4_addr_le,
7855                                 sizeof(pfilter->element.ipaddr.v4.data));
7856         } else {
7857                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7858                 for (i = 0; i < 4; i++) {
7859                         convert_ipv6[i] =
7860                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7861                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7862                 }
7863                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7864                            &convert_ipv6,
7865                            sizeof(pfilter->element.ipaddr.v6.data));
7866         }
7867
7868         /* check tunneled type */
7869         switch (tunnel_filter->tunnel_type) {
7870         case I40E_TUNNEL_TYPE_VXLAN:
7871                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7872                 break;
7873         case I40E_TUNNEL_TYPE_NVGRE:
7874                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7875                 break;
7876         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7877                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7878                 break;
7879         case I40E_TUNNEL_TYPE_MPLSoUDP:
7880                 if (!pf->mpls_replace_flag) {
7881                         i40e_replace_mpls_l1_filter(pf);
7882                         i40e_replace_mpls_cloud_filter(pf);
7883                         pf->mpls_replace_flag = 1;
7884                 }
7885                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7886                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7887                         teid_le >> 4;
7888                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7889                         (teid_le & 0xF) << 12;
7890                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7891                         0x40;
7892                 big_buffer = 1;
7893                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7894                 break;
7895         case I40E_TUNNEL_TYPE_MPLSoGRE:
7896                 if (!pf->mpls_replace_flag) {
7897                         i40e_replace_mpls_l1_filter(pf);
7898                         i40e_replace_mpls_cloud_filter(pf);
7899                         pf->mpls_replace_flag = 1;
7900                 }
7901                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7902                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7903                         teid_le >> 4;
7904                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7905                         (teid_le & 0xF) << 12;
7906                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7907                         0x0;
7908                 big_buffer = 1;
7909                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7910                 break;
7911         case I40E_TUNNEL_TYPE_GTPC:
7912                 if (!pf->gtp_replace_flag) {
7913                         i40e_replace_gtp_l1_filter(pf);
7914                         i40e_replace_gtp_cloud_filter(pf);
7915                         pf->gtp_replace_flag = 1;
7916                 }
7917                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7918                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
7919                         (teid_le >> 16) & 0xFFFF;
7920                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
7921                         teid_le & 0xFFFF;
7922                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
7923                         0x0;
7924                 big_buffer = 1;
7925                 break;
7926         case I40E_TUNNEL_TYPE_GTPU:
7927                 if (!pf->gtp_replace_flag) {
7928                         i40e_replace_gtp_l1_filter(pf);
7929                         i40e_replace_gtp_cloud_filter(pf);
7930                         pf->gtp_replace_flag = 1;
7931                 }
7932                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7933                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
7934                         (teid_le >> 16) & 0xFFFF;
7935                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
7936                         teid_le & 0xFFFF;
7937                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
7938                         0x0;
7939                 big_buffer = 1;
7940                 break;
7941         case I40E_TUNNEL_TYPE_QINQ:
7942                 if (!pf->qinq_replace_flag) {
7943                         ret = i40e_cloud_filter_qinq_create(pf);
7944                         if (ret < 0)
7945                                 PMD_DRV_LOG(DEBUG,
7946                                             "QinQ tunnel filter already created.");
7947                         pf->qinq_replace_flag = 1;
7948                 }
7949                 /*      Add in the General fields the values of
7950                  *      the Outer and Inner VLAN
7951                  *      Big Buffer should be set, see changes in
7952                  *      i40e_aq_add_cloud_filters
7953                  */
7954                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7955                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7956                 big_buffer = 1;
7957                 break;
7958         default:
7959                 /* Other tunnel types is not supported. */
7960                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7961                 rte_free(cld_filter);
7962                 return -EINVAL;
7963         }
7964
7965         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7966                 pfilter->element.flags =
7967                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7968         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7969                 pfilter->element.flags =
7970                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7971         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
7972                 pfilter->element.flags =
7973                         I40E_AQC_ADD_CLOUD_FILTER_0X11;
7974         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
7975                 pfilter->element.flags =
7976                         I40E_AQC_ADD_CLOUD_FILTER_0X12;
7977         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7978                 pfilter->element.flags |=
7979                         I40E_AQC_ADD_CLOUD_FILTER_0X10;
7980         else {
7981                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7982                                                 &pfilter->element.flags);
7983                 if (val < 0) {
7984                         rte_free(cld_filter);
7985                         return -EINVAL;
7986                 }
7987         }
7988
7989         pfilter->element.flags |= rte_cpu_to_le_16(
7990                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7991                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7992         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7993         pfilter->element.queue_number =
7994                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7995
7996         if (!tunnel_filter->is_to_vf)
7997                 vsi = pf->main_vsi;
7998         else {
7999                 if (tunnel_filter->vf_id >= pf->vf_num) {
8000                         PMD_DRV_LOG(ERR, "Invalid argument.");
8001                         rte_free(cld_filter);
8002                         return -EINVAL;
8003                 }
8004                 vf = &pf->vfs[tunnel_filter->vf_id];
8005                 vsi = vf->vsi;
8006         }
8007
8008         /* Check if there is the filter in SW list */
8009         memset(&check_filter, 0, sizeof(check_filter));
8010         i40e_tunnel_filter_convert(cld_filter, &check_filter);
8011         check_filter.is_to_vf = tunnel_filter->is_to_vf;
8012         check_filter.vf_id = tunnel_filter->vf_id;
8013         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8014         if (add && node) {
8015                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8016                 rte_free(cld_filter);
8017                 return -EINVAL;
8018         }
8019
8020         if (!add && !node) {
8021                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8022                 rte_free(cld_filter);
8023                 return -EINVAL;
8024         }
8025
8026         if (add) {
8027                 if (big_buffer)
8028                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
8029                                                    vsi->seid, cld_filter, 1);
8030                 else
8031                         ret = i40e_aq_add_cloud_filters(hw,
8032                                         vsi->seid, &cld_filter->element, 1);
8033                 if (ret < 0) {
8034                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8035                         rte_free(cld_filter);
8036                         return -ENOTSUP;
8037                 }
8038                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8039                 if (tunnel == NULL) {
8040                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8041                         rte_free(cld_filter);
8042                         return -ENOMEM;
8043                 }
8044
8045                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8046                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8047                 if (ret < 0)
8048                         rte_free(tunnel);
8049         } else {
8050                 if (big_buffer)
8051                         ret = i40e_aq_remove_cloud_filters_big_buffer(
8052                                 hw, vsi->seid, cld_filter, 1);
8053                 else
8054                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
8055                                                    &cld_filter->element, 1);
8056                 if (ret < 0) {
8057                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8058                         rte_free(cld_filter);
8059                         return -ENOTSUP;
8060                 }
8061                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8062         }
8063
8064         rte_free(cld_filter);
8065         return ret;
8066 }
8067
8068 static int
8069 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8070 {
8071         uint8_t i;
8072
8073         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8074                 if (pf->vxlan_ports[i] == port)
8075                         return i;
8076         }
8077
8078         return -1;
8079 }
8080
8081 static int
8082 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
8083 {
8084         int  idx, ret;
8085         uint8_t filter_idx;
8086         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8087
8088         idx = i40e_get_vxlan_port_idx(pf, port);
8089
8090         /* Check if port already exists */
8091         if (idx >= 0) {
8092                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8093                 return -EINVAL;
8094         }
8095
8096         /* Now check if there is space to add the new port */
8097         idx = i40e_get_vxlan_port_idx(pf, 0);
8098         if (idx < 0) {
8099                 PMD_DRV_LOG(ERR,
8100                         "Maximum number of UDP ports reached, not adding port %d",
8101                         port);
8102                 return -ENOSPC;
8103         }
8104
8105         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
8106                                         &filter_idx, NULL);
8107         if (ret < 0) {
8108                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8109                 return -1;
8110         }
8111
8112         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8113                          port,  filter_idx);
8114
8115         /* New port: add it and mark its index in the bitmap */
8116         pf->vxlan_ports[idx] = port;
8117         pf->vxlan_bitmap |= (1 << idx);
8118
8119         if (!(pf->flags & I40E_FLAG_VXLAN))
8120                 pf->flags |= I40E_FLAG_VXLAN;
8121
8122         return 0;
8123 }
8124
8125 static int
8126 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8127 {
8128         int idx;
8129         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8130
8131         if (!(pf->flags & I40E_FLAG_VXLAN)) {
8132                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8133                 return -EINVAL;
8134         }
8135
8136         idx = i40e_get_vxlan_port_idx(pf, port);
8137
8138         if (idx < 0) {
8139                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8140                 return -EINVAL;
8141         }
8142
8143         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8144                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8145                 return -1;
8146         }
8147
8148         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8149                         port, idx);
8150
8151         pf->vxlan_ports[idx] = 0;
8152         pf->vxlan_bitmap &= ~(1 << idx);
8153
8154         if (!pf->vxlan_bitmap)
8155                 pf->flags &= ~I40E_FLAG_VXLAN;
8156
8157         return 0;
8158 }
8159
8160 /* Add UDP tunneling port */
8161 static int
8162 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8163                              struct rte_eth_udp_tunnel *udp_tunnel)
8164 {
8165         int ret = 0;
8166         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8167
8168         if (udp_tunnel == NULL)
8169                 return -EINVAL;
8170
8171         switch (udp_tunnel->prot_type) {
8172         case RTE_TUNNEL_TYPE_VXLAN:
8173                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8174                 break;
8175
8176         case RTE_TUNNEL_TYPE_GENEVE:
8177         case RTE_TUNNEL_TYPE_TEREDO:
8178                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8179                 ret = -1;
8180                 break;
8181
8182         default:
8183                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8184                 ret = -1;
8185                 break;
8186         }
8187
8188         return ret;
8189 }
8190
8191 /* Remove UDP tunneling port */
8192 static int
8193 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8194                              struct rte_eth_udp_tunnel *udp_tunnel)
8195 {
8196         int ret = 0;
8197         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8198
8199         if (udp_tunnel == NULL)
8200                 return -EINVAL;
8201
8202         switch (udp_tunnel->prot_type) {
8203         case RTE_TUNNEL_TYPE_VXLAN:
8204                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8205                 break;
8206         case RTE_TUNNEL_TYPE_GENEVE:
8207         case RTE_TUNNEL_TYPE_TEREDO:
8208                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8209                 ret = -1;
8210                 break;
8211         default:
8212                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8213                 ret = -1;
8214                 break;
8215         }
8216
8217         return ret;
8218 }
8219
8220 /* Calculate the maximum number of contiguous PF queues that are configured */
8221 static int
8222 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8223 {
8224         struct rte_eth_dev_data *data = pf->dev_data;
8225         int i, num;
8226         struct i40e_rx_queue *rxq;
8227
8228         num = 0;
8229         for (i = 0; i < pf->lan_nb_qps; i++) {
8230                 rxq = data->rx_queues[i];
8231                 if (rxq && rxq->q_set)
8232                         num++;
8233                 else
8234                         break;
8235         }
8236
8237         return num;
8238 }
8239
8240 /* Configure RSS */
8241 static int
8242 i40e_pf_config_rss(struct i40e_pf *pf)
8243 {
8244         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8245         struct rte_eth_rss_conf rss_conf;
8246         uint32_t i, lut = 0;
8247         uint16_t j, num;
8248
8249         /*
8250          * If both VMDQ and RSS enabled, not all of PF queues are configured.
8251          * It's necessary to calculate the actual PF queues that are configured.
8252          */
8253         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8254                 num = i40e_pf_calc_configured_queues_num(pf);
8255         else
8256                 num = pf->dev_data->nb_rx_queues;
8257
8258         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8259         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8260                         num);
8261
8262         if (num == 0) {
8263                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8264                 return -ENOTSUP;
8265         }
8266
8267         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8268                 if (j == num)
8269                         j = 0;
8270                 lut = (lut << 8) | (j & ((0x1 <<
8271                         hw->func_caps.rss_table_entry_width) - 1));
8272                 if ((i & 3) == 3)
8273                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8274         }
8275
8276         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8277         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8278                 i40e_pf_disable_rss(pf);
8279                 return 0;
8280         }
8281         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8282                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8283                 /* Random default keys */
8284                 static uint32_t rss_key_default[] = {0x6b793944,
8285                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8286                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8287                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8288
8289                 rss_conf.rss_key = (uint8_t *)rss_key_default;
8290                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8291                                                         sizeof(uint32_t);
8292         }
8293
8294         return i40e_hw_rss_hash_set(pf, &rss_conf);
8295 }
8296
8297 static int
8298 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8299                                struct rte_eth_tunnel_filter_conf *filter)
8300 {
8301         if (pf == NULL || filter == NULL) {
8302                 PMD_DRV_LOG(ERR, "Invalid parameter");
8303                 return -EINVAL;
8304         }
8305
8306         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8307                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8308                 return -EINVAL;
8309         }
8310
8311         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8312                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8313                 return -EINVAL;
8314         }
8315
8316         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8317                 (is_zero_ether_addr(&filter->outer_mac))) {
8318                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8319                 return -EINVAL;
8320         }
8321
8322         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8323                 (is_zero_ether_addr(&filter->inner_mac))) {
8324                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8325                 return -EINVAL;
8326         }
8327
8328         return 0;
8329 }
8330
8331 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8332 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8333 static int
8334 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8335 {
8336         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8337         uint32_t val, reg;
8338         int ret = -EINVAL;
8339
8340         if (pf->support_multi_driver) {
8341                 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8342                 return -ENOTSUP;
8343         }
8344
8345         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8346         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8347
8348         if (len == 3) {
8349                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8350         } else if (len == 4) {
8351                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8352         } else {
8353                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8354                 return ret;
8355         }
8356
8357         if (reg != val) {
8358                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
8359                                                    reg, NULL);
8360                 if (ret != 0)
8361                         return ret;
8362                 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8363                             "with value 0x%08x",
8364                             I40E_GL_PRS_FVBM(2), reg);
8365                 i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
8366         } else {
8367                 ret = 0;
8368         }
8369         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8370                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8371
8372         return ret;
8373 }
8374
8375 static int
8376 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8377 {
8378         int ret = -EINVAL;
8379
8380         if (!hw || !cfg)
8381                 return -EINVAL;
8382
8383         switch (cfg->cfg_type) {
8384         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8385                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8386                 break;
8387         default:
8388                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8389                 break;
8390         }
8391
8392         return ret;
8393 }
8394
8395 static int
8396 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8397                                enum rte_filter_op filter_op,
8398                                void *arg)
8399 {
8400         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8401         int ret = I40E_ERR_PARAM;
8402
8403         switch (filter_op) {
8404         case RTE_ETH_FILTER_SET:
8405                 ret = i40e_dev_global_config_set(hw,
8406                         (struct rte_eth_global_cfg *)arg);
8407                 break;
8408         default:
8409                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8410                 break;
8411         }
8412
8413         return ret;
8414 }
8415
8416 static int
8417 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8418                           enum rte_filter_op filter_op,
8419                           void *arg)
8420 {
8421         struct rte_eth_tunnel_filter_conf *filter;
8422         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8423         int ret = I40E_SUCCESS;
8424
8425         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8426
8427         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8428                 return I40E_ERR_PARAM;
8429
8430         switch (filter_op) {
8431         case RTE_ETH_FILTER_NOP:
8432                 if (!(pf->flags & I40E_FLAG_VXLAN))
8433                         ret = I40E_NOT_SUPPORTED;
8434                 break;
8435         case RTE_ETH_FILTER_ADD:
8436                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8437                 break;
8438         case RTE_ETH_FILTER_DELETE:
8439                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8440                 break;
8441         default:
8442                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8443                 ret = I40E_ERR_PARAM;
8444                 break;
8445         }
8446
8447         return ret;
8448 }
8449
8450 static int
8451 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8452 {
8453         int ret = 0;
8454         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8455
8456         /* RSS setup */
8457         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8458                 ret = i40e_pf_config_rss(pf);
8459         else
8460                 i40e_pf_disable_rss(pf);
8461
8462         return ret;
8463 }
8464
8465 /* Get the symmetric hash enable configurations per port */
8466 static void
8467 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8468 {
8469         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8470
8471         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8472 }
8473
8474 /* Set the symmetric hash enable configurations per port */
8475 static void
8476 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8477 {
8478         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8479
8480         if (enable > 0) {
8481                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8482                         PMD_DRV_LOG(INFO,
8483                                 "Symmetric hash has already been enabled");
8484                         return;
8485                 }
8486                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8487         } else {
8488                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8489                         PMD_DRV_LOG(INFO,
8490                                 "Symmetric hash has already been disabled");
8491                         return;
8492                 }
8493                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8494         }
8495         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8496         I40E_WRITE_FLUSH(hw);
8497 }
8498
8499 /*
8500  * Get global configurations of hash function type and symmetric hash enable
8501  * per flow type (pctype). Note that global configuration means it affects all
8502  * the ports on the same NIC.
8503  */
8504 static int
8505 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8506                                    struct rte_eth_hash_global_conf *g_cfg)
8507 {
8508         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8509         uint32_t reg;
8510         uint16_t i, j;
8511
8512         memset(g_cfg, 0, sizeof(*g_cfg));
8513         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8514         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8515                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8516         else
8517                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8518         PMD_DRV_LOG(DEBUG, "Hash function is %s",
8519                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8520
8521         /*
8522          * As i40e supports less than 64 flow types, only first 64 bits need to
8523          * be checked.
8524          */
8525         for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8526                 g_cfg->valid_bit_mask[i] = 0ULL;
8527                 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8528         }
8529
8530         g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8531
8532         for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8533                 if (!adapter->pctypes_tbl[i])
8534                         continue;
8535                 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8536                      j < I40E_FILTER_PCTYPE_MAX; j++) {
8537                         if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8538                                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8539                                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8540                                         g_cfg->sym_hash_enable_mask[0] |=
8541                                                                 (1ULL << i);
8542                                 }
8543                         }
8544                 }
8545         }
8546
8547         return 0;
8548 }
8549
8550 static int
8551 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8552                               const struct rte_eth_hash_global_conf *g_cfg)
8553 {
8554         uint32_t i;
8555         uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8556
8557         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8558                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8559                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8560                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8561                                                 g_cfg->hash_func);
8562                 return -EINVAL;
8563         }
8564
8565         /*
8566          * As i40e supports less than 64 flow types, only first 64 bits need to
8567          * be checked.
8568          */
8569         mask0 = g_cfg->valid_bit_mask[0];
8570         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8571                 if (i == 0) {
8572                         /* Check if any unsupported flow type configured */
8573                         if ((mask0 | i40e_mask) ^ i40e_mask)
8574                                 goto mask_err;
8575                 } else {
8576                         if (g_cfg->valid_bit_mask[i])
8577                                 goto mask_err;
8578                 }
8579         }
8580
8581         return 0;
8582
8583 mask_err:
8584         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8585
8586         return -EINVAL;
8587 }
8588
8589 /*
8590  * Set global configurations of hash function type and symmetric hash enable
8591  * per flow type (pctype). Note any modifying global configuration will affect
8592  * all the ports on the same NIC.
8593  */
8594 static int
8595 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8596                                    struct rte_eth_hash_global_conf *g_cfg)
8597 {
8598         struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8599         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8600         int ret;
8601         uint16_t i, j;
8602         uint32_t reg;
8603         uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8604
8605         if (pf->support_multi_driver) {
8606                 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8607                 return -ENOTSUP;
8608         }
8609
8610         /* Check the input parameters */
8611         ret = i40e_hash_global_config_check(adapter, g_cfg);
8612         if (ret < 0)
8613                 return ret;
8614
8615         /*
8616          * As i40e supports less than 64 flow types, only first 64 bits need to
8617          * be configured.
8618          */
8619         for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8620                 if (mask0 & (1UL << i)) {
8621                         reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8622                                         I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8623
8624                         for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8625                              j < I40E_FILTER_PCTYPE_MAX; j++) {
8626                                 if (adapter->pctypes_tbl[i] & (1ULL << j))
8627                                         i40e_write_global_rx_ctl(hw,
8628                                                           I40E_GLQF_HSYM(j),
8629                                                           reg);
8630                         }
8631                         i40e_global_cfg_warning(I40E_WARNING_HSYM);
8632                 }
8633         }
8634
8635         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8636         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8637                 /* Toeplitz */
8638                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8639                         PMD_DRV_LOG(DEBUG,
8640                                 "Hash function already set to Toeplitz");
8641                         goto out;
8642                 }
8643                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8644         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8645                 /* Simple XOR */
8646                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8647                         PMD_DRV_LOG(DEBUG,
8648                                 "Hash function already set to Simple XOR");
8649                         goto out;
8650                 }
8651                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8652         } else
8653                 /* Use the default, and keep it as it is */
8654                 goto out;
8655
8656         i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8657         i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
8658
8659 out:
8660         I40E_WRITE_FLUSH(hw);
8661
8662         return 0;
8663 }
8664
8665 /**
8666  * Valid input sets for hash and flow director filters per PCTYPE
8667  */
8668 static uint64_t
8669 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8670                 enum rte_filter_type filter)
8671 {
8672         uint64_t valid;
8673
8674         static const uint64_t valid_hash_inset_table[] = {
8675                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8676                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8677                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8678                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8679                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8680                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8681                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8682                         I40E_INSET_FLEX_PAYLOAD,
8683                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8684                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8685                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8686                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8687                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8688                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8689                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8690                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8691                         I40E_INSET_FLEX_PAYLOAD,
8692                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8693                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8694                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8695                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8696                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8697                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8698                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8699                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8700                         I40E_INSET_FLEX_PAYLOAD,
8701                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8702                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8703                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8704                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8705                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8706                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8707                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8708                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8709                         I40E_INSET_FLEX_PAYLOAD,
8710                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8711                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8712                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8713                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8714                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8715                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8716                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8717                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8718                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8719                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8720                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8721                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8722                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8723                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8724                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8725                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8726                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8727                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8728                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8729                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8730                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8731                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8732                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8733                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8734                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8735                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8736                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8737                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8738                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8739                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8740                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8741                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8742                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8743                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8744                         I40E_INSET_FLEX_PAYLOAD,
8745                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8746                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8747                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8748                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8749                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8750                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8751                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8752                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8753                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8754                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8755                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8756                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8757                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8758                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8759                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8760                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8761                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8762                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8763                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8764                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8765                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8766                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8767                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8768                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8769                         I40E_INSET_FLEX_PAYLOAD,
8770                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8771                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8772                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8773                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8774                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8775                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8776                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8777                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8778                         I40E_INSET_FLEX_PAYLOAD,
8779                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8780                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8781                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8782                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8783                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8784                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8785                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8786                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8787                         I40E_INSET_FLEX_PAYLOAD,
8788                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8789                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8790                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8791                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8792                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8793                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8794                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8795                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8796                         I40E_INSET_FLEX_PAYLOAD,
8797                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8798                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8799                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8800                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8801                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8802                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8803                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8804                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8805                         I40E_INSET_FLEX_PAYLOAD,
8806                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8807                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8808                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8809                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8810                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8811                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8812                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8813                         I40E_INSET_FLEX_PAYLOAD,
8814                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8815                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8816                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8817                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8818                         I40E_INSET_FLEX_PAYLOAD,
8819         };
8820
8821         /**
8822          * Flow director supports only fields defined in
8823          * union rte_eth_fdir_flow.
8824          */
8825         static const uint64_t valid_fdir_inset_table[] = {
8826                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8827                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8828                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8829                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8830                 I40E_INSET_IPV4_TTL,
8831                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8832                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8833                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8834                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8835                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8836                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8837                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8838                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8839                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8840                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8841                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8842                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8843                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8844                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8845                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8846                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8847                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8848                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8849                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8850                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8851                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8852                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8853                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8854                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8855                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8856                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8857                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8858                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8859                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8860                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8861                 I40E_INSET_SCTP_VT,
8862                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8863                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8864                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8865                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8866                 I40E_INSET_IPV4_TTL,
8867                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8868                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8869                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8870                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8871                 I40E_INSET_IPV6_HOP_LIMIT,
8872                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8873                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8874                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8875                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8876                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8877                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8878                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8879                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8880                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8881                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8882                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8883                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8884                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8885                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8886                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8887                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8888                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8889                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8890                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8891                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8892                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8893                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8894                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8895                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8896                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8897                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8898                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8899                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8900                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8901                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8902                 I40E_INSET_SCTP_VT,
8903                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8904                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8905                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8906                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8907                 I40E_INSET_IPV6_HOP_LIMIT,
8908                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8909                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8910                 I40E_INSET_LAST_ETHER_TYPE,
8911         };
8912
8913         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8914                 return 0;
8915         if (filter == RTE_ETH_FILTER_HASH)
8916                 valid = valid_hash_inset_table[pctype];
8917         else
8918                 valid = valid_fdir_inset_table[pctype];
8919
8920         return valid;
8921 }
8922
8923 /**
8924  * Validate if the input set is allowed for a specific PCTYPE
8925  */
8926 int
8927 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8928                 enum rte_filter_type filter, uint64_t inset)
8929 {
8930         uint64_t valid;
8931
8932         valid = i40e_get_valid_input_set(pctype, filter);
8933         if (inset & (~valid))
8934                 return -EINVAL;
8935
8936         return 0;
8937 }
8938
8939 /* default input set fields combination per pctype */
8940 uint64_t
8941 i40e_get_default_input_set(uint16_t pctype)
8942 {
8943         static const uint64_t default_inset_table[] = {
8944                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8945                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8946                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8947                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8948                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8949                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8950                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8951                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8952                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8953                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8954                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8955                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8956                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8957                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8958                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8959                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8960                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8961                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8962                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8963                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8964                         I40E_INSET_SCTP_VT,
8965                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8966                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8967                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8968                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8969                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8970                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8971                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8972                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8973                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8974                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8975                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8976                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8977                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8978                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8979                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8980                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8981                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8982                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8983                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8984                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8985                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8986                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8987                         I40E_INSET_SCTP_VT,
8988                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8989                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8990                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8991                         I40E_INSET_LAST_ETHER_TYPE,
8992         };
8993
8994         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8995                 return 0;
8996
8997         return default_inset_table[pctype];
8998 }
8999
9000 /**
9001  * Parse the input set from index to logical bit masks
9002  */
9003 static int
9004 i40e_parse_input_set(uint64_t *inset,
9005                      enum i40e_filter_pctype pctype,
9006                      enum rte_eth_input_set_field *field,
9007                      uint16_t size)
9008 {
9009         uint16_t i, j;
9010         int ret = -EINVAL;
9011
9012         static const struct {
9013                 enum rte_eth_input_set_field field;
9014                 uint64_t inset;
9015         } inset_convert_table[] = {
9016                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9017                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9018                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9019                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9020                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9021                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9022                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9023                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9024                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9025                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9026                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9027                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9028                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9029                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9030                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9031                         I40E_INSET_IPV6_NEXT_HDR},
9032                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9033                         I40E_INSET_IPV6_HOP_LIMIT},
9034                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9035                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9036                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9037                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9038                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9039                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9040                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9041                         I40E_INSET_SCTP_VT},
9042                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9043                         I40E_INSET_TUNNEL_DMAC},
9044                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9045                         I40E_INSET_VLAN_TUNNEL},
9046                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9047                         I40E_INSET_TUNNEL_ID},
9048                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9049                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9050                         I40E_INSET_FLEX_PAYLOAD_W1},
9051                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9052                         I40E_INSET_FLEX_PAYLOAD_W2},
9053                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9054                         I40E_INSET_FLEX_PAYLOAD_W3},
9055                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9056                         I40E_INSET_FLEX_PAYLOAD_W4},
9057                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9058                         I40E_INSET_FLEX_PAYLOAD_W5},
9059                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9060                         I40E_INSET_FLEX_PAYLOAD_W6},
9061                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9062                         I40E_INSET_FLEX_PAYLOAD_W7},
9063                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9064                         I40E_INSET_FLEX_PAYLOAD_W8},
9065         };
9066
9067         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9068                 return ret;
9069
9070         /* Only one item allowed for default or all */
9071         if (size == 1) {
9072                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9073                         *inset = i40e_get_default_input_set(pctype);
9074                         return 0;
9075                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9076                         *inset = I40E_INSET_NONE;
9077                         return 0;
9078                 }
9079         }
9080
9081         for (i = 0, *inset = 0; i < size; i++) {
9082                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9083                         if (field[i] == inset_convert_table[j].field) {
9084                                 *inset |= inset_convert_table[j].inset;
9085                                 break;
9086                         }
9087                 }
9088
9089                 /* It contains unsupported input set, return immediately */
9090                 if (j == RTE_DIM(inset_convert_table))
9091                         return ret;
9092         }
9093
9094         return 0;
9095 }
9096
9097 /**
9098  * Translate the input set from bit masks to register aware bit masks
9099  * and vice versa
9100  */
9101 uint64_t
9102 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9103 {
9104         uint64_t val = 0;
9105         uint16_t i;
9106
9107         struct inset_map {
9108                 uint64_t inset;
9109                 uint64_t inset_reg;
9110         };
9111
9112         static const struct inset_map inset_map_common[] = {
9113                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9114                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9115                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9116                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9117                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9118                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9119                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9120                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9121                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9122                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9123                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9124                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9125                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9126                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9127                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9128                 {I40E_INSET_TUNNEL_DMAC,
9129                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9130                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9131                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9132                 {I40E_INSET_TUNNEL_SRC_PORT,
9133                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9134                 {I40E_INSET_TUNNEL_DST_PORT,
9135                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9136                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9137                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9138                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9139                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9140                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9141                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9142                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9143                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9144                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9145         };
9146
9147     /* some different registers map in x722*/
9148         static const struct inset_map inset_map_diff_x722[] = {
9149                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9150                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9151                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9152                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9153         };
9154
9155         static const struct inset_map inset_map_diff_not_x722[] = {
9156                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9157                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9158                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9159                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9160         };
9161
9162         if (input == 0)
9163                 return val;
9164
9165         /* Translate input set to register aware inset */
9166         if (type == I40E_MAC_X722) {
9167                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9168                         if (input & inset_map_diff_x722[i].inset)
9169                                 val |= inset_map_diff_x722[i].inset_reg;
9170                 }
9171         } else {
9172                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9173                         if (input & inset_map_diff_not_x722[i].inset)
9174                                 val |= inset_map_diff_not_x722[i].inset_reg;
9175                 }
9176         }
9177
9178         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9179                 if (input & inset_map_common[i].inset)
9180                         val |= inset_map_common[i].inset_reg;
9181         }
9182
9183         return val;
9184 }
9185
9186 int
9187 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9188 {
9189         uint8_t i, idx = 0;
9190         uint64_t inset_need_mask = inset;
9191
9192         static const struct {
9193                 uint64_t inset;
9194                 uint32_t mask;
9195         } inset_mask_map[] = {
9196                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9197                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9198                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9199                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9200                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9201                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9202                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9203                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9204         };
9205
9206         if (!inset || !mask || !nb_elem)
9207                 return 0;
9208
9209         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9210                 /* Clear the inset bit, if no MASK is required,
9211                  * for example proto + ttl
9212                  */
9213                 if ((inset & inset_mask_map[i].inset) ==
9214                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9215                         inset_need_mask &= ~inset_mask_map[i].inset;
9216                 if (!inset_need_mask)
9217                         return 0;
9218         }
9219         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9220                 if ((inset_need_mask & inset_mask_map[i].inset) ==
9221                     inset_mask_map[i].inset) {
9222                         if (idx >= nb_elem) {
9223                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9224                                 return -EINVAL;
9225                         }
9226                         mask[idx] = inset_mask_map[i].mask;
9227                         idx++;
9228                 }
9229         }
9230
9231         return idx;
9232 }
9233
9234 void
9235 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9236 {
9237         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9238
9239         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9240         if (reg != val)
9241                 i40e_write_rx_ctl(hw, addr, val);
9242         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9243                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9244 }
9245
9246 void
9247 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9248 {
9249         uint32_t reg = i40e_read_rx_ctl(hw, addr);
9250
9251         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9252         if (reg != val)
9253                 i40e_write_global_rx_ctl(hw, addr, val);
9254         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9255                     (uint32_t)i40e_read_rx_ctl(hw, addr));
9256 }
9257
9258 static void
9259 i40e_filter_input_set_init(struct i40e_pf *pf)
9260 {
9261         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9262         enum i40e_filter_pctype pctype;
9263         uint64_t input_set, inset_reg;
9264         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9265         int num, i;
9266         uint16_t flow_type;
9267
9268         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9269              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9270                 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9271
9272                 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9273                         continue;
9274
9275                 input_set = i40e_get_default_input_set(pctype);
9276
9277                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9278                                                    I40E_INSET_MASK_NUM_REG);
9279                 if (num < 0)
9280                         return;
9281                 if (pf->support_multi_driver && num > 0) {
9282                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9283                         return;
9284                 }
9285                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9286                                         input_set);
9287
9288                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9289                                       (uint32_t)(inset_reg & UINT32_MAX));
9290                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9291                                      (uint32_t)((inset_reg >>
9292                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
9293                 if (!pf->support_multi_driver) {
9294                         i40e_check_write_global_reg(hw,
9295                                             I40E_GLQF_HASH_INSET(0, pctype),
9296                                             (uint32_t)(inset_reg & UINT32_MAX));
9297                         i40e_check_write_global_reg(hw,
9298                                              I40E_GLQF_HASH_INSET(1, pctype),
9299                                              (uint32_t)((inset_reg >>
9300                                               I40E_32_BIT_WIDTH) & UINT32_MAX));
9301
9302                         for (i = 0; i < num; i++) {
9303                                 i40e_check_write_global_reg(hw,
9304                                                     I40E_GLQF_FD_MSK(i, pctype),
9305                                                     mask_reg[i]);
9306                                 i40e_check_write_global_reg(hw,
9307                                                   I40E_GLQF_HASH_MSK(i, pctype),
9308                                                   mask_reg[i]);
9309                         }
9310                         /*clear unused mask registers of the pctype */
9311                         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9312                                 i40e_check_write_global_reg(hw,
9313                                                     I40E_GLQF_FD_MSK(i, pctype),
9314                                                     0);
9315                                 i40e_check_write_global_reg(hw,
9316                                                   I40E_GLQF_HASH_MSK(i, pctype),
9317                                                   0);
9318                         }
9319                 } else {
9320                         PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9321                 }
9322                 I40E_WRITE_FLUSH(hw);
9323
9324                 /* store the default input set */
9325                 if (!pf->support_multi_driver)
9326                         pf->hash_input_set[pctype] = input_set;
9327                 pf->fdir.input_set[pctype] = input_set;
9328         }
9329
9330         if (!pf->support_multi_driver) {
9331                 i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9332                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9333                 i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9334         }
9335 }
9336
9337 int
9338 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9339                          struct rte_eth_input_set_conf *conf)
9340 {
9341         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9342         enum i40e_filter_pctype pctype;
9343         uint64_t input_set, inset_reg = 0;
9344         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9345         int ret, i, num;
9346
9347         if (!conf) {
9348                 PMD_DRV_LOG(ERR, "Invalid pointer");
9349                 return -EFAULT;
9350         }
9351         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9352             conf->op != RTE_ETH_INPUT_SET_ADD) {
9353                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9354                 return -EINVAL;
9355         }
9356
9357         if (pf->support_multi_driver) {
9358                 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9359                 return -ENOTSUP;
9360         }
9361
9362         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9363         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9364                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9365                 return -EINVAL;
9366         }
9367
9368         if (hw->mac.type == I40E_MAC_X722) {
9369                 /* get translated pctype value in fd pctype register */
9370                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9371                         I40E_GLQF_FD_PCTYPES((int)pctype));
9372         }
9373
9374         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9375                                    conf->inset_size);
9376         if (ret) {
9377                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9378                 return -EINVAL;
9379         }
9380
9381         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9382                 /* get inset value in register */
9383                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9384                 inset_reg <<= I40E_32_BIT_WIDTH;
9385                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9386                 input_set |= pf->hash_input_set[pctype];
9387         }
9388         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9389                                            I40E_INSET_MASK_NUM_REG);
9390         if (num < 0)
9391                 return -EINVAL;
9392
9393         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9394
9395         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9396                                     (uint32_t)(inset_reg & UINT32_MAX));
9397         i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9398                                     (uint32_t)((inset_reg >>
9399                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
9400         i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
9401
9402         for (i = 0; i < num; i++)
9403                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9404                                             mask_reg[i]);
9405         /*clear unused mask registers of the pctype */
9406         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9407                 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9408                                             0);
9409         i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
9410         I40E_WRITE_FLUSH(hw);
9411
9412         pf->hash_input_set[pctype] = input_set;
9413         return 0;
9414 }
9415
9416 int
9417 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9418                          struct rte_eth_input_set_conf *conf)
9419 {
9420         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9421         enum i40e_filter_pctype pctype;
9422         uint64_t input_set, inset_reg = 0;
9423         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9424         int ret, i, num;
9425
9426         if (!hw || !conf) {
9427                 PMD_DRV_LOG(ERR, "Invalid pointer");
9428                 return -EFAULT;
9429         }
9430         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9431             conf->op != RTE_ETH_INPUT_SET_ADD) {
9432                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9433                 return -EINVAL;
9434         }
9435
9436         pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9437
9438         if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9439                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9440                 return -EINVAL;
9441         }
9442
9443         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9444                                    conf->inset_size);
9445         if (ret) {
9446                 PMD_DRV_LOG(ERR, "Failed to parse input set");
9447                 return -EINVAL;
9448         }
9449
9450         /* get inset value in register */
9451         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9452         inset_reg <<= I40E_32_BIT_WIDTH;
9453         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9454
9455         /* Can not change the inset reg for flex payload for fdir,
9456          * it is done by writing I40E_PRTQF_FD_FLXINSET
9457          * in i40e_set_flex_mask_on_pctype.
9458          */
9459         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9460                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9461         else
9462                 input_set |= pf->fdir.input_set[pctype];
9463         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9464                                            I40E_INSET_MASK_NUM_REG);
9465         if (num < 0)
9466                 return -EINVAL;
9467         if (pf->support_multi_driver && num > 0) {
9468                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9469                 return -ENOTSUP;
9470         }
9471
9472         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9473
9474         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9475                               (uint32_t)(inset_reg & UINT32_MAX));
9476         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9477                              (uint32_t)((inset_reg >>
9478                              I40E_32_BIT_WIDTH) & UINT32_MAX));
9479
9480         if (!pf->support_multi_driver) {
9481                 for (i = 0; i < num; i++)
9482                         i40e_check_write_global_reg(hw,
9483                                                     I40E_GLQF_FD_MSK(i, pctype),
9484                                                     mask_reg[i]);
9485                 /*clear unused mask registers of the pctype */
9486                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9487                         i40e_check_write_global_reg(hw,
9488                                                     I40E_GLQF_FD_MSK(i, pctype),
9489                                                     0);
9490                 i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
9491         } else {
9492                 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9493         }
9494         I40E_WRITE_FLUSH(hw);
9495
9496         pf->fdir.input_set[pctype] = input_set;
9497         return 0;
9498 }
9499
9500 static int
9501 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9502 {
9503         int ret = 0;
9504
9505         if (!hw || !info) {
9506                 PMD_DRV_LOG(ERR, "Invalid pointer");
9507                 return -EFAULT;
9508         }
9509
9510         switch (info->info_type) {
9511         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9512                 i40e_get_symmetric_hash_enable_per_port(hw,
9513                                         &(info->info.enable));
9514                 break;
9515         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9516                 ret = i40e_get_hash_filter_global_config(hw,
9517                                 &(info->info.global_conf));
9518                 break;
9519         default:
9520                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9521                                                         info->info_type);
9522                 ret = -EINVAL;
9523                 break;
9524         }
9525
9526         return ret;
9527 }
9528
9529 static int
9530 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9531 {
9532         int ret = 0;
9533
9534         if (!hw || !info) {
9535                 PMD_DRV_LOG(ERR, "Invalid pointer");
9536                 return -EFAULT;
9537         }
9538
9539         switch (info->info_type) {
9540         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9541                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9542                 break;
9543         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9544                 ret = i40e_set_hash_filter_global_config(hw,
9545                                 &(info->info.global_conf));
9546                 break;
9547         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9548                 ret = i40e_hash_filter_inset_select(hw,
9549                                                &(info->info.input_set_conf));
9550                 break;
9551
9552         default:
9553                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9554                                                         info->info_type);
9555                 ret = -EINVAL;
9556                 break;
9557         }
9558
9559         return ret;
9560 }
9561
9562 /* Operations for hash function */
9563 static int
9564 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9565                       enum rte_filter_op filter_op,
9566                       void *arg)
9567 {
9568         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9569         int ret = 0;
9570
9571         switch (filter_op) {
9572         case RTE_ETH_FILTER_NOP:
9573                 break;
9574         case RTE_ETH_FILTER_GET:
9575                 ret = i40e_hash_filter_get(hw,
9576                         (struct rte_eth_hash_filter_info *)arg);
9577                 break;
9578         case RTE_ETH_FILTER_SET:
9579                 ret = i40e_hash_filter_set(hw,
9580                         (struct rte_eth_hash_filter_info *)arg);
9581                 break;
9582         default:
9583                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9584                                                                 filter_op);
9585                 ret = -ENOTSUP;
9586                 break;
9587         }
9588
9589         return ret;
9590 }
9591
9592 /* Convert ethertype filter structure */
9593 static int
9594 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9595                               struct i40e_ethertype_filter *filter)
9596 {
9597         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9598         filter->input.ether_type = input->ether_type;
9599         filter->flags = input->flags;
9600         filter->queue = input->queue;
9601
9602         return 0;
9603 }
9604
9605 /* Check if there exists the ehtertype filter */
9606 struct i40e_ethertype_filter *
9607 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9608                                 const struct i40e_ethertype_filter_input *input)
9609 {
9610         int ret;
9611
9612         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9613         if (ret < 0)
9614                 return NULL;
9615
9616         return ethertype_rule->hash_map[ret];
9617 }
9618
9619 /* Add ethertype filter in SW list */
9620 static int
9621 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9622                                 struct i40e_ethertype_filter *filter)
9623 {
9624         struct i40e_ethertype_rule *rule = &pf->ethertype;
9625         int ret;
9626
9627         ret = rte_hash_add_key(rule->hash_table, &filter->input);
9628         if (ret < 0) {
9629                 PMD_DRV_LOG(ERR,
9630                             "Failed to insert ethertype filter"
9631                             " to hash table %d!",
9632                             ret);
9633                 return ret;
9634         }
9635         rule->hash_map[ret] = filter;
9636
9637         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9638
9639         return 0;
9640 }
9641
9642 /* Delete ethertype filter in SW list */
9643 int
9644 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9645                              struct i40e_ethertype_filter_input *input)
9646 {
9647         struct i40e_ethertype_rule *rule = &pf->ethertype;
9648         struct i40e_ethertype_filter *filter;
9649         int ret;
9650
9651         ret = rte_hash_del_key(rule->hash_table, input);
9652         if (ret < 0) {
9653                 PMD_DRV_LOG(ERR,
9654                             "Failed to delete ethertype filter"
9655                             " to hash table %d!",
9656                             ret);
9657                 return ret;
9658         }
9659         filter = rule->hash_map[ret];
9660         rule->hash_map[ret] = NULL;
9661
9662         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9663         rte_free(filter);
9664
9665         return 0;
9666 }
9667
9668 /*
9669  * Configure ethertype filter, which can director packet by filtering
9670  * with mac address and ether_type or only ether_type
9671  */
9672 int
9673 i40e_ethertype_filter_set(struct i40e_pf *pf,
9674                         struct rte_eth_ethertype_filter *filter,
9675                         bool add)
9676 {
9677         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9678         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9679         struct i40e_ethertype_filter *ethertype_filter, *node;
9680         struct i40e_ethertype_filter check_filter;
9681         struct i40e_control_filter_stats stats;
9682         uint16_t flags = 0;
9683         int ret;
9684
9685         if (filter->queue >= pf->dev_data->nb_rx_queues) {
9686                 PMD_DRV_LOG(ERR, "Invalid queue ID");
9687                 return -EINVAL;
9688         }
9689         if (filter->ether_type == ETHER_TYPE_IPv4 ||
9690                 filter->ether_type == ETHER_TYPE_IPv6) {
9691                 PMD_DRV_LOG(ERR,
9692                         "unsupported ether_type(0x%04x) in control packet filter.",
9693                         filter->ether_type);
9694                 return -EINVAL;
9695         }
9696         if (filter->ether_type == ETHER_TYPE_VLAN)
9697                 PMD_DRV_LOG(WARNING,
9698                         "filter vlan ether_type in first tag is not supported.");
9699
9700         /* Check if there is the filter in SW list */
9701         memset(&check_filter, 0, sizeof(check_filter));
9702         i40e_ethertype_filter_convert(filter, &check_filter);
9703         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9704                                                &check_filter.input);
9705         if (add && node) {
9706                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9707                 return -EINVAL;
9708         }
9709
9710         if (!add && !node) {
9711                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9712                 return -EINVAL;
9713         }
9714
9715         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9716                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9717         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9718                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9719         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9720
9721         memset(&stats, 0, sizeof(stats));
9722         ret = i40e_aq_add_rem_control_packet_filter(hw,
9723                         filter->mac_addr.addr_bytes,
9724                         filter->ether_type, flags,
9725                         pf->main_vsi->seid,
9726                         filter->queue, add, &stats, NULL);
9727
9728         PMD_DRV_LOG(INFO,
9729                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9730                 ret, stats.mac_etype_used, stats.etype_used,
9731                 stats.mac_etype_free, stats.etype_free);
9732         if (ret < 0)
9733                 return -ENOSYS;
9734
9735         /* Add or delete a filter in SW list */
9736         if (add) {
9737                 ethertype_filter = rte_zmalloc("ethertype_filter",
9738                                        sizeof(*ethertype_filter), 0);
9739                 if (ethertype_filter == NULL) {
9740                         PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9741                         return -ENOMEM;
9742                 }
9743
9744                 rte_memcpy(ethertype_filter, &check_filter,
9745                            sizeof(check_filter));
9746                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9747                 if (ret < 0)
9748                         rte_free(ethertype_filter);
9749         } else {
9750                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9751         }
9752
9753         return ret;
9754 }
9755
9756 /*
9757  * Handle operations for ethertype filter.
9758  */
9759 static int
9760 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9761                                 enum rte_filter_op filter_op,
9762                                 void *arg)
9763 {
9764         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9765         int ret = 0;
9766
9767         if (filter_op == RTE_ETH_FILTER_NOP)
9768                 return ret;
9769
9770         if (arg == NULL) {
9771                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9772                             filter_op);
9773                 return -EINVAL;
9774         }
9775
9776         switch (filter_op) {
9777         case RTE_ETH_FILTER_ADD:
9778                 ret = i40e_ethertype_filter_set(pf,
9779                         (struct rte_eth_ethertype_filter *)arg,
9780                         TRUE);
9781                 break;
9782         case RTE_ETH_FILTER_DELETE:
9783                 ret = i40e_ethertype_filter_set(pf,
9784                         (struct rte_eth_ethertype_filter *)arg,
9785                         FALSE);
9786                 break;
9787         default:
9788                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9789                 ret = -ENOSYS;
9790                 break;
9791         }
9792         return ret;
9793 }
9794
9795 static int
9796 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9797                      enum rte_filter_type filter_type,
9798                      enum rte_filter_op filter_op,
9799                      void *arg)
9800 {
9801         int ret = 0;
9802
9803         if (dev == NULL)
9804                 return -EINVAL;
9805
9806         switch (filter_type) {
9807         case RTE_ETH_FILTER_NONE:
9808                 /* For global configuration */
9809                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9810                 break;
9811         case RTE_ETH_FILTER_HASH:
9812                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9813                 break;
9814         case RTE_ETH_FILTER_MACVLAN:
9815                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9816                 break;
9817         case RTE_ETH_FILTER_ETHERTYPE:
9818                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9819                 break;
9820         case RTE_ETH_FILTER_TUNNEL:
9821                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9822                 break;
9823         case RTE_ETH_FILTER_FDIR:
9824                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9825                 break;
9826         case RTE_ETH_FILTER_GENERIC:
9827                 if (filter_op != RTE_ETH_FILTER_GET)
9828                         return -EINVAL;
9829                 *(const void **)arg = &i40e_flow_ops;
9830                 break;
9831         default:
9832                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9833                                                         filter_type);
9834                 ret = -EINVAL;
9835                 break;
9836         }
9837
9838         return ret;
9839 }
9840
9841 /*
9842  * Check and enable Extended Tag.
9843  * Enabling Extended Tag is important for 40G performance.
9844  */
9845 static void
9846 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9847 {
9848         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9849         uint32_t buf = 0;
9850         int ret;
9851
9852         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9853                                       PCI_DEV_CAP_REG);
9854         if (ret < 0) {
9855                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9856                             PCI_DEV_CAP_REG);
9857                 return;
9858         }
9859         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9860                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9861                 return;
9862         }
9863
9864         buf = 0;
9865         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9866                                       PCI_DEV_CTRL_REG);
9867         if (ret < 0) {
9868                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9869                             PCI_DEV_CTRL_REG);
9870                 return;
9871         }
9872         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9873                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9874                 return;
9875         }
9876         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9877         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9878                                        PCI_DEV_CTRL_REG);
9879         if (ret < 0) {
9880                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9881                             PCI_DEV_CTRL_REG);
9882                 return;
9883         }
9884 }
9885
9886 /*
9887  * As some registers wouldn't be reset unless a global hardware reset,
9888  * hardware initialization is needed to put those registers into an
9889  * expected initial state.
9890  */
9891 static void
9892 i40e_hw_init(struct rte_eth_dev *dev)
9893 {
9894         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9895
9896         i40e_enable_extended_tag(dev);
9897
9898         /* clear the PF Queue Filter control register */
9899         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9900
9901         /* Disable symmetric hash per port */
9902         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9903 }
9904
9905 /*
9906  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9907  * however this function will return only one highest pctype index,
9908  * which is not quite correct. This is known problem of i40e driver
9909  * and needs to be fixed later.
9910  */
9911 enum i40e_filter_pctype
9912 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9913 {
9914         int i;
9915         uint64_t pctype_mask;
9916
9917         if (flow_type < I40E_FLOW_TYPE_MAX) {
9918                 pctype_mask = adapter->pctypes_tbl[flow_type];
9919                 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9920                         if (pctype_mask & (1ULL << i))
9921                                 return (enum i40e_filter_pctype)i;
9922                 }
9923         }
9924         return I40E_FILTER_PCTYPE_INVALID;
9925 }
9926
9927 uint16_t
9928 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9929                         enum i40e_filter_pctype pctype)
9930 {
9931         uint16_t flowtype;
9932         uint64_t pctype_mask = 1ULL << pctype;
9933
9934         for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9935              flowtype++) {
9936                 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9937                         return flowtype;
9938         }
9939
9940         return RTE_ETH_FLOW_UNKNOWN;
9941 }
9942
9943 /*
9944  * On X710, performance number is far from the expectation on recent firmware
9945  * versions; on XL710, performance number is also far from the expectation on
9946  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9947  * mode is enabled and port MAC address is equal to the packet destination MAC
9948  * address. The fix for this issue may not be integrated in the following
9949  * firmware version. So the workaround in software driver is needed. It needs
9950  * to modify the initial values of 3 internal only registers for both X710 and
9951  * XL710. Note that the values for X710 or XL710 could be different, and the
9952  * workaround can be removed when it is fixed in firmware in the future.
9953  */
9954
9955 /* For both X710 and XL710 */
9956 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1      0x10000200
9957 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2      0x203F0200
9958 #define I40E_GL_SWR_PRI_JOIN_MAP_0              0x26CE00
9959
9960 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9961 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9962
9963 /* For X722 */
9964 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9965 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9966
9967 /* For X710 */
9968 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9969 /* For XL710 */
9970 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9971 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9972
9973 static int
9974 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9975 {
9976         enum i40e_status_code status;
9977         struct i40e_aq_get_phy_abilities_resp phy_ab;
9978         int ret = -ENOTSUP;
9979         int retries = 0;
9980
9981         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9982                                               NULL);
9983
9984         while (status) {
9985                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9986                         status);
9987                 retries++;
9988                 rte_delay_us(100000);
9989                 if  (retries < 5)
9990                         status = i40e_aq_get_phy_capabilities(hw, false,
9991                                         true, &phy_ab, NULL);
9992                 else
9993                         return ret;
9994         }
9995         return 0;
9996 }
9997
9998 static void
9999 i40e_configure_registers(struct i40e_hw *hw)
10000 {
10001         static struct {
10002                 uint32_t addr;
10003                 uint64_t val;
10004         } reg_table[] = {
10005                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10006                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10007                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10008         };
10009         uint64_t reg;
10010         uint32_t i;
10011         int ret;
10012
10013         for (i = 0; i < RTE_DIM(reg_table); i++) {
10014                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10015                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10016                                 reg_table[i].val =
10017                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10018                         else /* For X710/XL710/XXV710 */
10019                                 if (hw->aq.fw_maj_ver < 6)
10020                                         reg_table[i].val =
10021                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10022                                 else
10023                                         reg_table[i].val =
10024                                              I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10025                 }
10026
10027                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10028                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10029                                 reg_table[i].val =
10030                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10031                         else /* For X710/XL710/XXV710 */
10032                                 reg_table[i].val =
10033                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10034                 }
10035
10036                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10037                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
10038                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
10039                                 reg_table[i].val =
10040                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
10041                         else /* For X710 */
10042                                 reg_table[i].val =
10043                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
10044                 }
10045
10046                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10047                                                         &reg, NULL);
10048                 if (ret < 0) {
10049                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10050                                                         reg_table[i].addr);
10051                         break;
10052                 }
10053                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10054                                                 reg_table[i].addr, reg);
10055                 if (reg == reg_table[i].val)
10056                         continue;
10057
10058                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10059                                                 reg_table[i].val, NULL);
10060                 if (ret < 0) {
10061                         PMD_DRV_LOG(ERR,
10062                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10063                                 reg_table[i].val, reg_table[i].addr);
10064                         break;
10065                 }
10066                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10067                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10068         }
10069 }
10070
10071 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
10072 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10073 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10074 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10075 static int
10076 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10077 {
10078         uint32_t reg;
10079         int ret;
10080
10081         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10082                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10083                 return -EINVAL;
10084         }
10085
10086         /* Configure for double VLAN RX stripping */
10087         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10088         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10089                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10090                 ret = i40e_aq_debug_write_register(hw,
10091                                                    I40E_VSI_TSR(vsi->vsi_id),
10092                                                    reg, NULL);
10093                 if (ret < 0) {
10094                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10095                                     vsi->vsi_id);
10096                         return I40E_ERR_CONFIG;
10097                 }
10098         }
10099
10100         /* Configure for double VLAN TX insertion */
10101         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10102         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10103                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10104                 ret = i40e_aq_debug_write_register(hw,
10105                                                    I40E_VSI_L2TAGSTXVALID(
10106                                                    vsi->vsi_id), reg, NULL);
10107                 if (ret < 0) {
10108                         PMD_DRV_LOG(ERR,
10109                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
10110                                 vsi->vsi_id);
10111                         return I40E_ERR_CONFIG;
10112                 }
10113         }
10114
10115         return 0;
10116 }
10117
10118 /**
10119  * i40e_aq_add_mirror_rule
10120  * @hw: pointer to the hardware structure
10121  * @seid: VEB seid to add mirror rule to
10122  * @dst_id: destination vsi seid
10123  * @entries: Buffer which contains the entities to be mirrored
10124  * @count: number of entities contained in the buffer
10125  * @rule_id:the rule_id of the rule to be added
10126  *
10127  * Add a mirror rule for a given veb.
10128  *
10129  **/
10130 static enum i40e_status_code
10131 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10132                         uint16_t seid, uint16_t dst_id,
10133                         uint16_t rule_type, uint16_t *entries,
10134                         uint16_t count, uint16_t *rule_id)
10135 {
10136         struct i40e_aq_desc desc;
10137         struct i40e_aqc_add_delete_mirror_rule cmd;
10138         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10139                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10140                 &desc.params.raw;
10141         uint16_t buff_len;
10142         enum i40e_status_code status;
10143
10144         i40e_fill_default_direct_cmd_desc(&desc,
10145                                           i40e_aqc_opc_add_mirror_rule);
10146         memset(&cmd, 0, sizeof(cmd));
10147
10148         buff_len = sizeof(uint16_t) * count;
10149         desc.datalen = rte_cpu_to_le_16(buff_len);
10150         if (buff_len > 0)
10151                 desc.flags |= rte_cpu_to_le_16(
10152                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10153         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10154                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10155         cmd.num_entries = rte_cpu_to_le_16(count);
10156         cmd.seid = rte_cpu_to_le_16(seid);
10157         cmd.destination = rte_cpu_to_le_16(dst_id);
10158
10159         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10160         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10161         PMD_DRV_LOG(INFO,
10162                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10163                 hw->aq.asq_last_status, resp->rule_id,
10164                 resp->mirror_rules_used, resp->mirror_rules_free);
10165         *rule_id = rte_le_to_cpu_16(resp->rule_id);
10166
10167         return status;
10168 }
10169
10170 /**
10171  * i40e_aq_del_mirror_rule
10172  * @hw: pointer to the hardware structure
10173  * @seid: VEB seid to add mirror rule to
10174  * @entries: Buffer which contains the entities to be mirrored
10175  * @count: number of entities contained in the buffer
10176  * @rule_id:the rule_id of the rule to be delete
10177  *
10178  * Delete a mirror rule for a given veb.
10179  *
10180  **/
10181 static enum i40e_status_code
10182 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10183                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10184                 uint16_t count, uint16_t rule_id)
10185 {
10186         struct i40e_aq_desc desc;
10187         struct i40e_aqc_add_delete_mirror_rule cmd;
10188         uint16_t buff_len = 0;
10189         enum i40e_status_code status;
10190         void *buff = NULL;
10191
10192         i40e_fill_default_direct_cmd_desc(&desc,
10193                                           i40e_aqc_opc_delete_mirror_rule);
10194         memset(&cmd, 0, sizeof(cmd));
10195         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10196                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10197                                                           I40E_AQ_FLAG_RD));
10198                 cmd.num_entries = count;
10199                 buff_len = sizeof(uint16_t) * count;
10200                 desc.datalen = rte_cpu_to_le_16(buff_len);
10201                 buff = (void *)entries;
10202         } else
10203                 /* rule id is filled in destination field for deleting mirror rule */
10204                 cmd.destination = rte_cpu_to_le_16(rule_id);
10205
10206         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10207                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10208         cmd.seid = rte_cpu_to_le_16(seid);
10209
10210         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10211         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10212
10213         return status;
10214 }
10215
10216 /**
10217  * i40e_mirror_rule_set
10218  * @dev: pointer to the hardware structure
10219  * @mirror_conf: mirror rule info
10220  * @sw_id: mirror rule's sw_id
10221  * @on: enable/disable
10222  *
10223  * set a mirror rule.
10224  *
10225  **/
10226 static int
10227 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10228                         struct rte_eth_mirror_conf *mirror_conf,
10229                         uint8_t sw_id, uint8_t on)
10230 {
10231         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10232         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10233         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10234         struct i40e_mirror_rule *parent = NULL;
10235         uint16_t seid, dst_seid, rule_id;
10236         uint16_t i, j = 0;
10237         int ret;
10238
10239         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10240
10241         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10242                 PMD_DRV_LOG(ERR,
10243                         "mirror rule can not be configured without veb or vfs.");
10244                 return -ENOSYS;
10245         }
10246         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10247                 PMD_DRV_LOG(ERR, "mirror table is full.");
10248                 return -ENOSPC;
10249         }
10250         if (mirror_conf->dst_pool > pf->vf_num) {
10251                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10252                                  mirror_conf->dst_pool);
10253                 return -EINVAL;
10254         }
10255
10256         seid = pf->main_vsi->veb->seid;
10257
10258         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10259                 if (sw_id <= it->index) {
10260                         mirr_rule = it;
10261                         break;
10262                 }
10263                 parent = it;
10264         }
10265         if (mirr_rule && sw_id == mirr_rule->index) {
10266                 if (on) {
10267                         PMD_DRV_LOG(ERR, "mirror rule exists.");
10268                         return -EEXIST;
10269                 } else {
10270                         ret = i40e_aq_del_mirror_rule(hw, seid,
10271                                         mirr_rule->rule_type,
10272                                         mirr_rule->entries,
10273                                         mirr_rule->num_entries, mirr_rule->id);
10274                         if (ret < 0) {
10275                                 PMD_DRV_LOG(ERR,
10276                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
10277                                         ret, hw->aq.asq_last_status);
10278                                 return -ENOSYS;
10279                         }
10280                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10281                         rte_free(mirr_rule);
10282                         pf->nb_mirror_rule--;
10283                         return 0;
10284                 }
10285         } else if (!on) {
10286                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10287                 return -ENOENT;
10288         }
10289
10290         mirr_rule = rte_zmalloc("i40e_mirror_rule",
10291                                 sizeof(struct i40e_mirror_rule) , 0);
10292         if (!mirr_rule) {
10293                 PMD_DRV_LOG(ERR, "failed to allocate memory");
10294                 return I40E_ERR_NO_MEMORY;
10295         }
10296         switch (mirror_conf->rule_type) {
10297         case ETH_MIRROR_VLAN:
10298                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10299                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10300                                 mirr_rule->entries[j] =
10301                                         mirror_conf->vlan.vlan_id[i];
10302                                 j++;
10303                         }
10304                 }
10305                 if (j == 0) {
10306                         PMD_DRV_LOG(ERR, "vlan is not specified.");
10307                         rte_free(mirr_rule);
10308                         return -EINVAL;
10309                 }
10310                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10311                 break;
10312         case ETH_MIRROR_VIRTUAL_POOL_UP:
10313         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10314                 /* check if the specified pool bit is out of range */
10315                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10316                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
10317                         rte_free(mirr_rule);
10318                         return -EINVAL;
10319                 }
10320                 for (i = 0, j = 0; i < pf->vf_num; i++) {
10321                         if (mirror_conf->pool_mask & (1ULL << i)) {
10322                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10323                                 j++;
10324                         }
10325                 }
10326                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10327                         /* add pf vsi to entries */
10328                         mirr_rule->entries[j] = pf->main_vsi_seid;
10329                         j++;
10330                 }
10331                 if (j == 0) {
10332                         PMD_DRV_LOG(ERR, "pool is not specified.");
10333                         rte_free(mirr_rule);
10334                         return -EINVAL;
10335                 }
10336                 /* egress and ingress in aq commands means from switch but not port */
10337                 mirr_rule->rule_type =
10338                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10339                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10340                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10341                 break;
10342         case ETH_MIRROR_UPLINK_PORT:
10343                 /* egress and ingress in aq commands means from switch but not port*/
10344                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10345                 break;
10346         case ETH_MIRROR_DOWNLINK_PORT:
10347                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10348                 break;
10349         default:
10350                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10351                         mirror_conf->rule_type);
10352                 rte_free(mirr_rule);
10353                 return -EINVAL;
10354         }
10355
10356         /* If the dst_pool is equal to vf_num, consider it as PF */
10357         if (mirror_conf->dst_pool == pf->vf_num)
10358                 dst_seid = pf->main_vsi_seid;
10359         else
10360                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10361
10362         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10363                                       mirr_rule->rule_type, mirr_rule->entries,
10364                                       j, &rule_id);
10365         if (ret < 0) {
10366                 PMD_DRV_LOG(ERR,
10367                         "failed to add mirror rule: ret = %d, aq_err = %d.",
10368                         ret, hw->aq.asq_last_status);
10369                 rte_free(mirr_rule);
10370                 return -ENOSYS;
10371         }
10372
10373         mirr_rule->index = sw_id;
10374         mirr_rule->num_entries = j;
10375         mirr_rule->id = rule_id;
10376         mirr_rule->dst_vsi_seid = dst_seid;
10377
10378         if (parent)
10379                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10380         else
10381                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10382
10383         pf->nb_mirror_rule++;
10384         return 0;
10385 }
10386
10387 /**
10388  * i40e_mirror_rule_reset
10389  * @dev: pointer to the device
10390  * @sw_id: mirror rule's sw_id
10391  *
10392  * reset a mirror rule.
10393  *
10394  **/
10395 static int
10396 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10397 {
10398         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10399         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10400         struct i40e_mirror_rule *it, *mirr_rule = NULL;
10401         uint16_t seid;
10402         int ret;
10403
10404         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10405
10406         seid = pf->main_vsi->veb->seid;
10407
10408         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10409                 if (sw_id == it->index) {
10410                         mirr_rule = it;
10411                         break;
10412                 }
10413         }
10414         if (mirr_rule) {
10415                 ret = i40e_aq_del_mirror_rule(hw, seid,
10416                                 mirr_rule->rule_type,
10417                                 mirr_rule->entries,
10418                                 mirr_rule->num_entries, mirr_rule->id);
10419                 if (ret < 0) {
10420                         PMD_DRV_LOG(ERR,
10421                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
10422                                 ret, hw->aq.asq_last_status);
10423                         return -ENOSYS;
10424                 }
10425                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10426                 rte_free(mirr_rule);
10427                 pf->nb_mirror_rule--;
10428         } else {
10429                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10430                 return -ENOENT;
10431         }
10432         return 0;
10433 }
10434
10435 static uint64_t
10436 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10437 {
10438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10439         uint64_t systim_cycles;
10440
10441         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10442         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10443                         << 32;
10444
10445         return systim_cycles;
10446 }
10447
10448 static uint64_t
10449 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10450 {
10451         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10452         uint64_t rx_tstamp;
10453
10454         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10455         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10456                         << 32;
10457
10458         return rx_tstamp;
10459 }
10460
10461 static uint64_t
10462 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10463 {
10464         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10465         uint64_t tx_tstamp;
10466
10467         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10468         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10469                         << 32;
10470
10471         return tx_tstamp;
10472 }
10473
10474 static void
10475 i40e_start_timecounters(struct rte_eth_dev *dev)
10476 {
10477         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10478         struct i40e_adapter *adapter =
10479                         (struct i40e_adapter *)dev->data->dev_private;
10480         struct rte_eth_link link;
10481         uint32_t tsync_inc_l;
10482         uint32_t tsync_inc_h;
10483
10484         /* Get current link speed. */
10485         i40e_dev_link_update(dev, 1);
10486         rte_eth_linkstatus_get(dev, &link);
10487
10488         switch (link.link_speed) {
10489         case ETH_SPEED_NUM_40G:
10490                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10491                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10492                 break;
10493         case ETH_SPEED_NUM_10G:
10494                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10495                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10496                 break;
10497         case ETH_SPEED_NUM_1G:
10498                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10499                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10500                 break;
10501         default:
10502                 tsync_inc_l = 0x0;
10503                 tsync_inc_h = 0x0;
10504         }
10505
10506         /* Set the timesync increment value. */
10507         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10508         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10509
10510         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10511         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10512         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10513
10514         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10515         adapter->systime_tc.cc_shift = 0;
10516         adapter->systime_tc.nsec_mask = 0;
10517
10518         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10519         adapter->rx_tstamp_tc.cc_shift = 0;
10520         adapter->rx_tstamp_tc.nsec_mask = 0;
10521
10522         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10523         adapter->tx_tstamp_tc.cc_shift = 0;
10524         adapter->tx_tstamp_tc.nsec_mask = 0;
10525 }
10526
10527 static int
10528 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10529 {
10530         struct i40e_adapter *adapter =
10531                         (struct i40e_adapter *)dev->data->dev_private;
10532
10533         adapter->systime_tc.nsec += delta;
10534         adapter->rx_tstamp_tc.nsec += delta;
10535         adapter->tx_tstamp_tc.nsec += delta;
10536
10537         return 0;
10538 }
10539
10540 static int
10541 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10542 {
10543         uint64_t ns;
10544         struct i40e_adapter *adapter =
10545                         (struct i40e_adapter *)dev->data->dev_private;
10546
10547         ns = rte_timespec_to_ns(ts);
10548
10549         /* Set the timecounters to a new value. */
10550         adapter->systime_tc.nsec = ns;
10551         adapter->rx_tstamp_tc.nsec = ns;
10552         adapter->tx_tstamp_tc.nsec = ns;
10553
10554         return 0;
10555 }
10556
10557 static int
10558 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10559 {
10560         uint64_t ns, systime_cycles;
10561         struct i40e_adapter *adapter =
10562                         (struct i40e_adapter *)dev->data->dev_private;
10563
10564         systime_cycles = i40e_read_systime_cyclecounter(dev);
10565         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10566         *ts = rte_ns_to_timespec(ns);
10567
10568         return 0;
10569 }
10570
10571 static int
10572 i40e_timesync_enable(struct rte_eth_dev *dev)
10573 {
10574         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10575         uint32_t tsync_ctl_l;
10576         uint32_t tsync_ctl_h;
10577
10578         /* Stop the timesync system time. */
10579         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10580         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10581         /* Reset the timesync system time value. */
10582         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10583         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10584
10585         i40e_start_timecounters(dev);
10586
10587         /* Clear timesync registers. */
10588         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10589         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10590         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10591         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10592         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10593         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10594
10595         /* Enable timestamping of PTP packets. */
10596         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10597         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10598
10599         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10600         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10601         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10602
10603         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10604         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10605
10606         return 0;
10607 }
10608
10609 static int
10610 i40e_timesync_disable(struct rte_eth_dev *dev)
10611 {
10612         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10613         uint32_t tsync_ctl_l;
10614         uint32_t tsync_ctl_h;
10615
10616         /* Disable timestamping of transmitted PTP packets. */
10617         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10618         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10619
10620         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10621         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10622
10623         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10624         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10625
10626         /* Reset the timesync increment value. */
10627         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10628         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10629
10630         return 0;
10631 }
10632
10633 static int
10634 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10635                                 struct timespec *timestamp, uint32_t flags)
10636 {
10637         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10638         struct i40e_adapter *adapter =
10639                 (struct i40e_adapter *)dev->data->dev_private;
10640
10641         uint32_t sync_status;
10642         uint32_t index = flags & 0x03;
10643         uint64_t rx_tstamp_cycles;
10644         uint64_t ns;
10645
10646         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10647         if ((sync_status & (1 << index)) == 0)
10648                 return -EINVAL;
10649
10650         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10651         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10652         *timestamp = rte_ns_to_timespec(ns);
10653
10654         return 0;
10655 }
10656
10657 static int
10658 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10659                                 struct timespec *timestamp)
10660 {
10661         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10662         struct i40e_adapter *adapter =
10663                 (struct i40e_adapter *)dev->data->dev_private;
10664
10665         uint32_t sync_status;
10666         uint64_t tx_tstamp_cycles;
10667         uint64_t ns;
10668
10669         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10670         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10671                 return -EINVAL;
10672
10673         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10674         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10675         *timestamp = rte_ns_to_timespec(ns);
10676
10677         return 0;
10678 }
10679
10680 /*
10681  * i40e_parse_dcb_configure - parse dcb configure from user
10682  * @dev: the device being configured
10683  * @dcb_cfg: pointer of the result of parse
10684  * @*tc_map: bit map of enabled traffic classes
10685  *
10686  * Returns 0 on success, negative value on failure
10687  */
10688 static int
10689 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10690                          struct i40e_dcbx_config *dcb_cfg,
10691                          uint8_t *tc_map)
10692 {
10693         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10694         uint8_t i, tc_bw, bw_lf;
10695
10696         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10697
10698         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10699         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10700                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10701                 return -EINVAL;
10702         }
10703
10704         /* assume each tc has the same bw */
10705         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10706         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10707                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10708         /* to ensure the sum of tcbw is equal to 100 */
10709         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10710         for (i = 0; i < bw_lf; i++)
10711                 dcb_cfg->etscfg.tcbwtable[i]++;
10712
10713         /* assume each tc has the same Transmission Selection Algorithm */
10714         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10715                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10716
10717         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10718                 dcb_cfg->etscfg.prioritytable[i] =
10719                                 dcb_rx_conf->dcb_tc[i];
10720
10721         /* FW needs one App to configure HW */
10722         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10723         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10724         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10725         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10726
10727         if (dcb_rx_conf->nb_tcs == 0)
10728                 *tc_map = 1; /* tc0 only */
10729         else
10730                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10731
10732         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10733                 dcb_cfg->pfc.willing = 0;
10734                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10735                 dcb_cfg->pfc.pfcenable = *tc_map;
10736         }
10737         return 0;
10738 }
10739
10740
10741 static enum i40e_status_code
10742 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10743                               struct i40e_aqc_vsi_properties_data *info,
10744                               uint8_t enabled_tcmap)
10745 {
10746         enum i40e_status_code ret;
10747         int i, total_tc = 0;
10748         uint16_t qpnum_per_tc, bsf, qp_idx;
10749         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10750         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10751         uint16_t used_queues;
10752
10753         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10754         if (ret != I40E_SUCCESS)
10755                 return ret;
10756
10757         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10758                 if (enabled_tcmap & (1 << i))
10759                         total_tc++;
10760         }
10761         if (total_tc == 0)
10762                 total_tc = 1;
10763         vsi->enabled_tc = enabled_tcmap;
10764
10765         /* different VSI has different queues assigned */
10766         if (vsi->type == I40E_VSI_MAIN)
10767                 used_queues = dev_data->nb_rx_queues -
10768                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10769         else if (vsi->type == I40E_VSI_VMDQ2)
10770                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10771         else {
10772                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10773                 return I40E_ERR_NO_AVAILABLE_VSI;
10774         }
10775
10776         qpnum_per_tc = used_queues / total_tc;
10777         /* Number of queues per enabled TC */
10778         if (qpnum_per_tc == 0) {
10779                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10780                 return I40E_ERR_INVALID_QP_ID;
10781         }
10782         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10783                                 I40E_MAX_Q_PER_TC);
10784         bsf = rte_bsf32(qpnum_per_tc);
10785
10786         /**
10787          * Configure TC and queue mapping parameters, for enabled TC,
10788          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10789          * default queue will serve it.
10790          */
10791         qp_idx = 0;
10792         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10793                 if (vsi->enabled_tc & (1 << i)) {
10794                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10795                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10796                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10797                         qp_idx += qpnum_per_tc;
10798                 } else
10799                         info->tc_mapping[i] = 0;
10800         }
10801
10802         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10803         if (vsi->type == I40E_VSI_SRIOV) {
10804                 info->mapping_flags |=
10805                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10806                 for (i = 0; i < vsi->nb_qps; i++)
10807                         info->queue_mapping[i] =
10808                                 rte_cpu_to_le_16(vsi->base_queue + i);
10809         } else {
10810                 info->mapping_flags |=
10811                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10812                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10813         }
10814         info->valid_sections |=
10815                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10816
10817         return I40E_SUCCESS;
10818 }
10819
10820 /*
10821  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10822  * @veb: VEB to be configured
10823  * @tc_map: enabled TC bitmap
10824  *
10825  * Returns 0 on success, negative value on failure
10826  */
10827 static enum i40e_status_code
10828 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10829 {
10830         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10831         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10832         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10833         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10834         enum i40e_status_code ret = I40E_SUCCESS;
10835         int i;
10836         uint32_t bw_max;
10837
10838         /* Check if enabled_tc is same as existing or new TCs */
10839         if (veb->enabled_tc == tc_map)
10840                 return ret;
10841
10842         /* configure tc bandwidth */
10843         memset(&veb_bw, 0, sizeof(veb_bw));
10844         veb_bw.tc_valid_bits = tc_map;
10845         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10846         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10847                 if (tc_map & BIT_ULL(i))
10848                         veb_bw.tc_bw_share_credits[i] = 1;
10849         }
10850         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10851                                                    &veb_bw, NULL);
10852         if (ret) {
10853                 PMD_INIT_LOG(ERR,
10854                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10855                         hw->aq.asq_last_status);
10856                 return ret;
10857         }
10858
10859         memset(&ets_query, 0, sizeof(ets_query));
10860         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10861                                                    &ets_query, NULL);
10862         if (ret != I40E_SUCCESS) {
10863                 PMD_DRV_LOG(ERR,
10864                         "Failed to get switch_comp ETS configuration %u",
10865                         hw->aq.asq_last_status);
10866                 return ret;
10867         }
10868         memset(&bw_query, 0, sizeof(bw_query));
10869         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10870                                                   &bw_query, NULL);
10871         if (ret != I40E_SUCCESS) {
10872                 PMD_DRV_LOG(ERR,
10873                         "Failed to get switch_comp bandwidth configuration %u",
10874                         hw->aq.asq_last_status);
10875                 return ret;
10876         }
10877
10878         /* store and print out BW info */
10879         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10880         veb->bw_info.bw_max = ets_query.tc_bw_max;
10881         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10882         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10883         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10884                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10885                      I40E_16_BIT_WIDTH);
10886         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10887                 veb->bw_info.bw_ets_share_credits[i] =
10888                                 bw_query.tc_bw_share_credits[i];
10889                 veb->bw_info.bw_ets_credits[i] =
10890                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10891                 /* 4 bits per TC, 4th bit is reserved */
10892                 veb->bw_info.bw_ets_max[i] =
10893                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10894                                   RTE_LEN2MASK(3, uint8_t));
10895                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10896                             veb->bw_info.bw_ets_share_credits[i]);
10897                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10898                             veb->bw_info.bw_ets_credits[i]);
10899                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10900                             veb->bw_info.bw_ets_max[i]);
10901         }
10902
10903         veb->enabled_tc = tc_map;
10904
10905         return ret;
10906 }
10907
10908
10909 /*
10910  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10911  * @vsi: VSI to be configured
10912  * @tc_map: enabled TC bitmap
10913  *
10914  * Returns 0 on success, negative value on failure
10915  */
10916 static enum i40e_status_code
10917 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10918 {
10919         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10920         struct i40e_vsi_context ctxt;
10921         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10922         enum i40e_status_code ret = I40E_SUCCESS;
10923         int i;
10924
10925         /* Check if enabled_tc is same as existing or new TCs */
10926         if (vsi->enabled_tc == tc_map)
10927                 return ret;
10928
10929         /* configure tc bandwidth */
10930         memset(&bw_data, 0, sizeof(bw_data));
10931         bw_data.tc_valid_bits = tc_map;
10932         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10933         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10934                 if (tc_map & BIT_ULL(i))
10935                         bw_data.tc_bw_credits[i] = 1;
10936         }
10937         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10938         if (ret) {
10939                 PMD_INIT_LOG(ERR,
10940                         "AQ command Config VSI BW allocation per TC failed = %d",
10941                         hw->aq.asq_last_status);
10942                 goto out;
10943         }
10944         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10945                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10946
10947         /* Update Queue Pairs Mapping for currently enabled UPs */
10948         ctxt.seid = vsi->seid;
10949         ctxt.pf_num = hw->pf_id;
10950         ctxt.vf_num = 0;
10951         ctxt.uplink_seid = vsi->uplink_seid;
10952         ctxt.info = vsi->info;
10953         i40e_get_cap(hw);
10954         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10955         if (ret)
10956                 goto out;
10957
10958         /* Update the VSI after updating the VSI queue-mapping information */
10959         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10960         if (ret) {
10961                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10962                         hw->aq.asq_last_status);
10963                 goto out;
10964         }
10965         /* update the local VSI info with updated queue map */
10966         rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10967                                         sizeof(vsi->info.tc_mapping));
10968         rte_memcpy(&vsi->info.queue_mapping,
10969                         &ctxt.info.queue_mapping,
10970                 sizeof(vsi->info.queue_mapping));
10971         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10972         vsi->info.valid_sections = 0;
10973
10974         /* query and update current VSI BW information */
10975         ret = i40e_vsi_get_bw_config(vsi);
10976         if (ret) {
10977                 PMD_INIT_LOG(ERR,
10978                          "Failed updating vsi bw info, err %s aq_err %s",
10979                          i40e_stat_str(hw, ret),
10980                          i40e_aq_str(hw, hw->aq.asq_last_status));
10981                 goto out;
10982         }
10983
10984         vsi->enabled_tc = tc_map;
10985
10986 out:
10987         return ret;
10988 }
10989
10990 /*
10991  * i40e_dcb_hw_configure - program the dcb setting to hw
10992  * @pf: pf the configuration is taken on
10993  * @new_cfg: new configuration
10994  * @tc_map: enabled TC bitmap
10995  *
10996  * Returns 0 on success, negative value on failure
10997  */
10998 static enum i40e_status_code
10999 i40e_dcb_hw_configure(struct i40e_pf *pf,
11000                       struct i40e_dcbx_config *new_cfg,
11001                       uint8_t tc_map)
11002 {
11003         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11004         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11005         struct i40e_vsi *main_vsi = pf->main_vsi;
11006         struct i40e_vsi_list *vsi_list;
11007         enum i40e_status_code ret;
11008         int i;
11009         uint32_t val;
11010
11011         /* Use the FW API if FW > v4.4*/
11012         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11013               (hw->aq.fw_maj_ver >= 5))) {
11014                 PMD_INIT_LOG(ERR,
11015                         "FW < v4.4, can not use FW LLDP API to configure DCB");
11016                 return I40E_ERR_FIRMWARE_API_VERSION;
11017         }
11018
11019         /* Check if need reconfiguration */
11020         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11021                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11022                 return I40E_SUCCESS;
11023         }
11024
11025         /* Copy the new config to the current config */
11026         *old_cfg = *new_cfg;
11027         old_cfg->etsrec = old_cfg->etscfg;
11028         ret = i40e_set_dcb_config(hw);
11029         if (ret) {
11030                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11031                          i40e_stat_str(hw, ret),
11032                          i40e_aq_str(hw, hw->aq.asq_last_status));
11033                 return ret;
11034         }
11035         /* set receive Arbiter to RR mode and ETS scheme by default */
11036         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11037                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11038                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11039                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11040                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11041                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11042                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11043                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11044                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11045                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11046                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11047                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11048                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11049         }
11050         /* get local mib to check whether it is configured correctly */
11051         /* IEEE mode */
11052         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11053         /* Get Local DCB Config */
11054         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11055                                      &hw->local_dcbx_config);
11056
11057         /* if Veb is created, need to update TC of it at first */
11058         if (main_vsi->veb) {
11059                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11060                 if (ret)
11061                         PMD_INIT_LOG(WARNING,
11062                                  "Failed configuring TC for VEB seid=%d",
11063                                  main_vsi->veb->seid);
11064         }
11065         /* Update each VSI */
11066         i40e_vsi_config_tc(main_vsi, tc_map);
11067         if (main_vsi->veb) {
11068                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11069                         /* Beside main VSI and VMDQ VSIs, only enable default
11070                          * TC for other VSIs
11071                          */
11072                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11073                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11074                                                          tc_map);
11075                         else
11076                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
11077                                                          I40E_DEFAULT_TCMAP);
11078                         if (ret)
11079                                 PMD_INIT_LOG(WARNING,
11080                                         "Failed configuring TC for VSI seid=%d",
11081                                         vsi_list->vsi->seid);
11082                         /* continue */
11083                 }
11084         }
11085         return I40E_SUCCESS;
11086 }
11087
11088 /*
11089  * i40e_dcb_init_configure - initial dcb config
11090  * @dev: device being configured
11091  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11092  *
11093  * Returns 0 on success, negative value on failure
11094  */
11095 int
11096 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11097 {
11098         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11099         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11100         int i, ret = 0;
11101
11102         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11103                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11104                 return -ENOTSUP;
11105         }
11106
11107         /* DCB initialization:
11108          * Update DCB configuration from the Firmware and configure
11109          * LLDP MIB change event.
11110          */
11111         if (sw_dcb == TRUE) {
11112                 ret = i40e_init_dcb(hw);
11113                 /* If lldp agent is stopped, the return value from
11114                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11115                  * adminq status. Otherwise, it should return success.
11116                  */
11117                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11118                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11119                         memset(&hw->local_dcbx_config, 0,
11120                                 sizeof(struct i40e_dcbx_config));
11121                         /* set dcb default configuration */
11122                         hw->local_dcbx_config.etscfg.willing = 0;
11123                         hw->local_dcbx_config.etscfg.maxtcs = 0;
11124                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11125                         hw->local_dcbx_config.etscfg.tsatable[0] =
11126                                                 I40E_IEEE_TSA_ETS;
11127                         /* all UPs mapping to TC0 */
11128                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11129                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11130                         hw->local_dcbx_config.etsrec =
11131                                 hw->local_dcbx_config.etscfg;
11132                         hw->local_dcbx_config.pfc.willing = 0;
11133                         hw->local_dcbx_config.pfc.pfccap =
11134                                                 I40E_MAX_TRAFFIC_CLASS;
11135                         /* FW needs one App to configure HW */
11136                         hw->local_dcbx_config.numapps = 1;
11137                         hw->local_dcbx_config.app[0].selector =
11138                                                 I40E_APP_SEL_ETHTYPE;
11139                         hw->local_dcbx_config.app[0].priority = 3;
11140                         hw->local_dcbx_config.app[0].protocolid =
11141                                                 I40E_APP_PROTOID_FCOE;
11142                         ret = i40e_set_dcb_config(hw);
11143                         if (ret) {
11144                                 PMD_INIT_LOG(ERR,
11145                                         "default dcb config fails. err = %d, aq_err = %d.",
11146                                         ret, hw->aq.asq_last_status);
11147                                 return -ENOSYS;
11148                         }
11149                 } else {
11150                         PMD_INIT_LOG(ERR,
11151                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11152                                 ret, hw->aq.asq_last_status);
11153                         return -ENOTSUP;
11154                 }
11155         } else {
11156                 ret = i40e_aq_start_lldp(hw, NULL);
11157                 if (ret != I40E_SUCCESS)
11158                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11159
11160                 ret = i40e_init_dcb(hw);
11161                 if (!ret) {
11162                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11163                                 PMD_INIT_LOG(ERR,
11164                                         "HW doesn't support DCBX offload.");
11165                                 return -ENOTSUP;
11166                         }
11167                 } else {
11168                         PMD_INIT_LOG(ERR,
11169                                 "DCBX configuration failed, err = %d, aq_err = %d.",
11170                                 ret, hw->aq.asq_last_status);
11171                         return -ENOTSUP;
11172                 }
11173         }
11174         return 0;
11175 }
11176
11177 /*
11178  * i40e_dcb_setup - setup dcb related config
11179  * @dev: device being configured
11180  *
11181  * Returns 0 on success, negative value on failure
11182  */
11183 static int
11184 i40e_dcb_setup(struct rte_eth_dev *dev)
11185 {
11186         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11187         struct i40e_dcbx_config dcb_cfg;
11188         uint8_t tc_map = 0;
11189         int ret = 0;
11190
11191         if ((pf->flags & I40E_FLAG_DCB) == 0) {
11192                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11193                 return -ENOTSUP;
11194         }
11195
11196         if (pf->vf_num != 0)
11197                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11198
11199         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11200         if (ret) {
11201                 PMD_INIT_LOG(ERR, "invalid dcb config");
11202                 return -EINVAL;
11203         }
11204         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11205         if (ret) {
11206                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11207                 return -ENOSYS;
11208         }
11209
11210         return 0;
11211 }
11212
11213 static int
11214 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11215                       struct rte_eth_dcb_info *dcb_info)
11216 {
11217         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11218         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11219         struct i40e_vsi *vsi = pf->main_vsi;
11220         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11221         uint16_t bsf, tc_mapping;
11222         int i, j = 0;
11223
11224         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11225                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11226         else
11227                 dcb_info->nb_tcs = 1;
11228         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11229                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11230         for (i = 0; i < dcb_info->nb_tcs; i++)
11231                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11232
11233         /* get queue mapping if vmdq is disabled */
11234         if (!pf->nb_cfg_vmdq_vsi) {
11235                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11236                         if (!(vsi->enabled_tc & (1 << i)))
11237                                 continue;
11238                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11239                         dcb_info->tc_queue.tc_rxq[j][i].base =
11240                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11241                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11242                         dcb_info->tc_queue.tc_txq[j][i].base =
11243                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11244                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11245                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11246                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11247                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11248                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11249                 }
11250                 return 0;
11251         }
11252
11253         /* get queue mapping if vmdq is enabled */
11254         do {
11255                 vsi = pf->vmdq[j].vsi;
11256                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11257                         if (!(vsi->enabled_tc & (1 << i)))
11258                                 continue;
11259                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11260                         dcb_info->tc_queue.tc_rxq[j][i].base =
11261                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11262                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11263                         dcb_info->tc_queue.tc_txq[j][i].base =
11264                                 dcb_info->tc_queue.tc_rxq[j][i].base;
11265                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11266                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11267                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11268                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11269                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11270                 }
11271                 j++;
11272         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11273         return 0;
11274 }
11275
11276 static int
11277 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11278 {
11279         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11280         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11281         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11282         uint16_t msix_intr;
11283
11284         msix_intr = intr_handle->intr_vec[queue_id];
11285         if (msix_intr == I40E_MISC_VEC_ID)
11286                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11287                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
11288                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11289                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11290         else
11291                 I40E_WRITE_REG(hw,
11292                                I40E_PFINT_DYN_CTLN(msix_intr -
11293                                                    I40E_RX_VEC_START),
11294                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
11295                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11296                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11297
11298         I40E_WRITE_FLUSH(hw);
11299         rte_intr_enable(&pci_dev->intr_handle);
11300
11301         return 0;
11302 }
11303
11304 static int
11305 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11306 {
11307         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11308         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11309         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11310         uint16_t msix_intr;
11311
11312         msix_intr = intr_handle->intr_vec[queue_id];
11313         if (msix_intr == I40E_MISC_VEC_ID)
11314                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11315                                I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11316         else
11317                 I40E_WRITE_REG(hw,
11318                                I40E_PFINT_DYN_CTLN(msix_intr -
11319                                                    I40E_RX_VEC_START),
11320                                I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11321         I40E_WRITE_FLUSH(hw);
11322
11323         return 0;
11324 }
11325
11326 static int i40e_get_regs(struct rte_eth_dev *dev,
11327                          struct rte_dev_reg_info *regs)
11328 {
11329         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11330         uint32_t *ptr_data = regs->data;
11331         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11332         const struct i40e_reg_info *reg_info;
11333
11334         if (ptr_data == NULL) {
11335                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11336                 regs->width = sizeof(uint32_t);
11337                 return 0;
11338         }
11339
11340         /* The first few registers have to be read using AQ operations */
11341         reg_idx = 0;
11342         while (i40e_regs_adminq[reg_idx].name) {
11343                 reg_info = &i40e_regs_adminq[reg_idx++];
11344                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11345                         for (arr_idx2 = 0;
11346                                         arr_idx2 <= reg_info->count2;
11347                                         arr_idx2++) {
11348                                 reg_offset = arr_idx * reg_info->stride1 +
11349                                         arr_idx2 * reg_info->stride2;
11350                                 reg_offset += reg_info->base_addr;
11351                                 ptr_data[reg_offset >> 2] =
11352                                         i40e_read_rx_ctl(hw, reg_offset);
11353                         }
11354         }
11355
11356         /* The remaining registers can be read using primitives */
11357         reg_idx = 0;
11358         while (i40e_regs_others[reg_idx].name) {
11359                 reg_info = &i40e_regs_others[reg_idx++];
11360                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11361                         for (arr_idx2 = 0;
11362                                         arr_idx2 <= reg_info->count2;
11363                                         arr_idx2++) {
11364                                 reg_offset = arr_idx * reg_info->stride1 +
11365                                         arr_idx2 * reg_info->stride2;
11366                                 reg_offset += reg_info->base_addr;
11367                                 ptr_data[reg_offset >> 2] =
11368                                         I40E_READ_REG(hw, reg_offset);
11369                         }
11370         }
11371
11372         return 0;
11373 }
11374
11375 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11376 {
11377         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11378
11379         /* Convert word count to byte count */
11380         return hw->nvm.sr_size << 1;
11381 }
11382
11383 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11384                            struct rte_dev_eeprom_info *eeprom)
11385 {
11386         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11387         uint16_t *data = eeprom->data;
11388         uint16_t offset, length, cnt_words;
11389         int ret_code;
11390
11391         offset = eeprom->offset >> 1;
11392         length = eeprom->length >> 1;
11393         cnt_words = length;
11394
11395         if (offset > hw->nvm.sr_size ||
11396                 offset + length > hw->nvm.sr_size) {
11397                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11398                 return -EINVAL;
11399         }
11400
11401         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11402
11403         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11404         if (ret_code != I40E_SUCCESS || cnt_words != length) {
11405                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11406                 return -EIO;
11407         }
11408
11409         return 0;
11410 }
11411
11412 static int i40e_get_module_info(struct rte_eth_dev *dev,
11413                                 struct rte_eth_dev_module_info *modinfo)
11414 {
11415         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11416         uint32_t sff8472_comp = 0;
11417         uint32_t sff8472_swap = 0;
11418         uint32_t sff8636_rev = 0;
11419         i40e_status status;
11420         uint32_t type = 0;
11421
11422         /* Check if firmware supports reading module EEPROM. */
11423         if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11424                 PMD_DRV_LOG(ERR,
11425                             "Module EEPROM memory read not supported. "
11426                             "Please update the NVM image.\n");
11427                 return -EINVAL;
11428         }
11429
11430         status = i40e_update_link_info(hw);
11431         if (status)
11432                 return -EIO;
11433
11434         if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11435                 PMD_DRV_LOG(ERR,
11436                             "Cannot read module EEPROM memory. "
11437                             "No module connected.\n");
11438                 return -EINVAL;
11439         }
11440
11441         type = hw->phy.link_info.module_type[0];
11442
11443         switch (type) {
11444         case I40E_MODULE_TYPE_SFP:
11445                 status = i40e_aq_get_phy_register(hw,
11446                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11447                                 I40E_I2C_EEPROM_DEV_ADDR,
11448                                 I40E_MODULE_SFF_8472_COMP,
11449                                 &sff8472_comp, NULL);
11450                 if (status)
11451                         return -EIO;
11452
11453                 status = i40e_aq_get_phy_register(hw,
11454                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11455                                 I40E_I2C_EEPROM_DEV_ADDR,
11456                                 I40E_MODULE_SFF_8472_SWAP,
11457                                 &sff8472_swap, NULL);
11458                 if (status)
11459                         return -EIO;
11460
11461                 /* Check if the module requires address swap to access
11462                  * the other EEPROM memory page.
11463                  */
11464                 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11465                         PMD_DRV_LOG(WARNING,
11466                                     "Module address swap to access "
11467                                     "page 0xA2 is not supported.\n");
11468                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11469                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11470                 } else if (sff8472_comp == 0x00) {
11471                         /* Module is not SFF-8472 compliant */
11472                         modinfo->type = RTE_ETH_MODULE_SFF_8079;
11473                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11474                 } else {
11475                         modinfo->type = RTE_ETH_MODULE_SFF_8472;
11476                         modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11477                 }
11478                 break;
11479         case I40E_MODULE_TYPE_QSFP_PLUS:
11480                 /* Read from memory page 0. */
11481                 status = i40e_aq_get_phy_register(hw,
11482                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11483                                 0,
11484                                 I40E_MODULE_REVISION_ADDR,
11485                                 &sff8636_rev, NULL);
11486                 if (status)
11487                         return -EIO;
11488                 /* Determine revision compliance byte */
11489                 if (sff8636_rev > 0x02) {
11490                         /* Module is SFF-8636 compliant */
11491                         modinfo->type = RTE_ETH_MODULE_SFF_8636;
11492                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11493                 } else {
11494                         modinfo->type = RTE_ETH_MODULE_SFF_8436;
11495                         modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11496                 }
11497                 break;
11498         case I40E_MODULE_TYPE_QSFP28:
11499                 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11500                 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11501                 break;
11502         default:
11503                 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11504                 return -EINVAL;
11505         }
11506         return 0;
11507 }
11508
11509 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11510                                   struct rte_dev_eeprom_info *info)
11511 {
11512         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11513         bool is_sfp = false;
11514         i40e_status status;
11515         uint8_t *data = info->data;
11516         uint32_t value = 0;
11517         uint32_t i;
11518
11519         if (!info || !info->length || !data)
11520                 return -EINVAL;
11521
11522         if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11523                 is_sfp = true;
11524
11525         for (i = 0; i < info->length; i++) {
11526                 u32 offset = i + info->offset;
11527                 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11528
11529                 /* Check if we need to access the other memory page */
11530                 if (is_sfp) {
11531                         if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11532                                 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11533                                 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11534                         }
11535                 } else {
11536                         while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11537                                 /* Compute memory page number and offset. */
11538                                 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11539                                 addr++;
11540                         }
11541                 }
11542                 status = i40e_aq_get_phy_register(hw,
11543                                 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11544                                 addr, offset, &value, NULL);
11545                 if (status)
11546                         return -EIO;
11547                 data[i] = (uint8_t)value;
11548         }
11549         return 0;
11550 }
11551
11552 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11553                                      struct ether_addr *mac_addr)
11554 {
11555         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11556         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11557         struct i40e_vsi *vsi = pf->main_vsi;
11558         struct i40e_mac_filter_info mac_filter;
11559         struct i40e_mac_filter *f;
11560         int ret;
11561
11562         if (!is_valid_assigned_ether_addr(mac_addr)) {
11563                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11564                 return -EINVAL;
11565         }
11566
11567         TAILQ_FOREACH(f, &vsi->mac_list, next) {
11568                 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11569                         break;
11570         }
11571
11572         if (f == NULL) {
11573                 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11574                 return -EIO;
11575         }
11576
11577         mac_filter = f->mac_info;
11578         ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11579         if (ret != I40E_SUCCESS) {
11580                 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11581                 return -EIO;
11582         }
11583         memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11584         ret = i40e_vsi_add_mac(vsi, &mac_filter);
11585         if (ret != I40E_SUCCESS) {
11586                 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11587                 return -EIO;
11588         }
11589         memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11590
11591         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11592                                         mac_addr->addr_bytes, NULL);
11593         if (ret != I40E_SUCCESS) {
11594                 PMD_DRV_LOG(ERR, "Failed to change mac");
11595                 return -EIO;
11596         }
11597
11598         return 0;
11599 }
11600
11601 static int
11602 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11603 {
11604         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11605         struct rte_eth_dev_data *dev_data = pf->dev_data;
11606         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11607         int ret = 0;
11608
11609         /* check if mtu is within the allowed range */
11610         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11611                 return -EINVAL;
11612
11613         /* mtu setting is forbidden if port is start */
11614         if (dev_data->dev_started) {
11615                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11616                             dev_data->port_id);
11617                 return -EBUSY;
11618         }
11619
11620         if (frame_size > ETHER_MAX_LEN)
11621                 dev_data->dev_conf.rxmode.offloads |=
11622                         DEV_RX_OFFLOAD_JUMBO_FRAME;
11623         else
11624                 dev_data->dev_conf.rxmode.offloads &=
11625                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11626
11627         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11628
11629         return ret;
11630 }
11631
11632 /* Restore ethertype filter */
11633 static void
11634 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11635 {
11636         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11637         struct i40e_ethertype_filter_list
11638                 *ethertype_list = &pf->ethertype.ethertype_list;
11639         struct i40e_ethertype_filter *f;
11640         struct i40e_control_filter_stats stats;
11641         uint16_t flags;
11642
11643         TAILQ_FOREACH(f, ethertype_list, rules) {
11644                 flags = 0;
11645                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11646                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11647                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11648                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11649                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11650
11651                 memset(&stats, 0, sizeof(stats));
11652                 i40e_aq_add_rem_control_packet_filter(hw,
11653                                             f->input.mac_addr.addr_bytes,
11654                                             f->input.ether_type,
11655                                             flags, pf->main_vsi->seid,
11656                                             f->queue, 1, &stats, NULL);
11657         }
11658         PMD_DRV_LOG(INFO, "Ethertype filter:"
11659                     " mac_etype_used = %u, etype_used = %u,"
11660                     " mac_etype_free = %u, etype_free = %u",
11661                     stats.mac_etype_used, stats.etype_used,
11662                     stats.mac_etype_free, stats.etype_free);
11663 }
11664
11665 /* Restore tunnel filter */
11666 static void
11667 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11668 {
11669         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11670         struct i40e_vsi *vsi;
11671         struct i40e_pf_vf *vf;
11672         struct i40e_tunnel_filter_list
11673                 *tunnel_list = &pf->tunnel.tunnel_list;
11674         struct i40e_tunnel_filter *f;
11675         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11676         bool big_buffer = 0;
11677
11678         TAILQ_FOREACH(f, tunnel_list, rules) {
11679                 if (!f->is_to_vf)
11680                         vsi = pf->main_vsi;
11681                 else {
11682                         vf = &pf->vfs[f->vf_id];
11683                         vsi = vf->vsi;
11684                 }
11685                 memset(&cld_filter, 0, sizeof(cld_filter));
11686                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11687                         (struct ether_addr *)&cld_filter.element.outer_mac);
11688                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11689                         (struct ether_addr *)&cld_filter.element.inner_mac);
11690                 cld_filter.element.inner_vlan = f->input.inner_vlan;
11691                 cld_filter.element.flags = f->input.flags;
11692                 cld_filter.element.tenant_id = f->input.tenant_id;
11693                 cld_filter.element.queue_number = f->queue;
11694                 rte_memcpy(cld_filter.general_fields,
11695                            f->input.general_fields,
11696                            sizeof(f->input.general_fields));
11697
11698                 if (((f->input.flags &
11699                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11700                      I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11701                     ((f->input.flags &
11702                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11703                      I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11704                     ((f->input.flags &
11705                      I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11706                      I40E_AQC_ADD_CLOUD_FILTER_0X10))
11707                         big_buffer = 1;
11708
11709                 if (big_buffer)
11710                         i40e_aq_add_cloud_filters_big_buffer(hw,
11711                                              vsi->seid, &cld_filter, 1);
11712                 else
11713                         i40e_aq_add_cloud_filters(hw, vsi->seid,
11714                                                   &cld_filter.element, 1);
11715         }
11716 }
11717
11718 /* Restore rss filter */
11719 static inline void
11720 i40e_rss_filter_restore(struct i40e_pf *pf)
11721 {
11722         struct i40e_rte_flow_rss_conf *conf =
11723                                         &pf->rss_info;
11724         if (conf->conf.queue_num)
11725                 i40e_config_rss_filter(pf, conf, TRUE);
11726 }
11727
11728 static void
11729 i40e_filter_restore(struct i40e_pf *pf)
11730 {
11731         i40e_ethertype_filter_restore(pf);
11732         i40e_tunnel_filter_restore(pf);
11733         i40e_fdir_filter_restore(pf);
11734         i40e_rss_filter_restore(pf);
11735 }
11736
11737 static bool
11738 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11739 {
11740         if (strcmp(dev->device->driver->name, drv->driver.name))
11741                 return false;
11742
11743         return true;
11744 }
11745
11746 bool
11747 is_i40e_supported(struct rte_eth_dev *dev)
11748 {
11749         return is_device_supported(dev, &rte_i40e_pmd);
11750 }
11751
11752 struct i40e_customized_pctype*
11753 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11754 {
11755         int i;
11756
11757         for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11758                 if (pf->customized_pctype[i].index == index)
11759                         return &pf->customized_pctype[i];
11760         }
11761         return NULL;
11762 }
11763
11764 static int
11765 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11766                               uint32_t pkg_size, uint32_t proto_num,
11767                               struct rte_pmd_i40e_proto_info *proto,
11768                               enum rte_pmd_i40e_package_op op)
11769 {
11770         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11771         uint32_t pctype_num;
11772         struct rte_pmd_i40e_ptype_info *pctype;
11773         uint32_t buff_size;
11774         struct i40e_customized_pctype *new_pctype = NULL;
11775         uint8_t proto_id;
11776         uint8_t pctype_value;
11777         char name[64];
11778         uint32_t i, j, n;
11779         int ret;
11780
11781         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11782             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11783                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11784                 return -1;
11785         }
11786
11787         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11788                                 (uint8_t *)&pctype_num, sizeof(pctype_num),
11789                                 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11790         if (ret) {
11791                 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11792                 return -1;
11793         }
11794         if (!pctype_num) {
11795                 PMD_DRV_LOG(INFO, "No new pctype added");
11796                 return -1;
11797         }
11798
11799         buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11800         pctype = rte_zmalloc("new_pctype", buff_size, 0);
11801         if (!pctype) {
11802                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11803                 return -1;
11804         }
11805         /* get information about new pctype list */
11806         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11807                                         (uint8_t *)pctype, buff_size,
11808                                         RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11809         if (ret) {
11810                 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11811                 rte_free(pctype);
11812                 return -1;
11813         }
11814
11815         /* Update customized pctype. */
11816         for (i = 0; i < pctype_num; i++) {
11817                 pctype_value = pctype[i].ptype_id;
11818                 memset(name, 0, sizeof(name));
11819                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11820                         proto_id = pctype[i].protocols[j];
11821                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11822                                 continue;
11823                         for (n = 0; n < proto_num; n++) {
11824                                 if (proto[n].proto_id != proto_id)
11825                                         continue;
11826                                 strcat(name, proto[n].name);
11827                                 strcat(name, "_");
11828                                 break;
11829                         }
11830                 }
11831                 name[strlen(name) - 1] = '\0';
11832                 if (!strcmp(name, "GTPC"))
11833                         new_pctype =
11834                                 i40e_find_customized_pctype(pf,
11835                                                       I40E_CUSTOMIZED_GTPC);
11836                 else if (!strcmp(name, "GTPU_IPV4"))
11837                         new_pctype =
11838                                 i40e_find_customized_pctype(pf,
11839                                                    I40E_CUSTOMIZED_GTPU_IPV4);
11840                 else if (!strcmp(name, "GTPU_IPV6"))
11841                         new_pctype =
11842                                 i40e_find_customized_pctype(pf,
11843                                                    I40E_CUSTOMIZED_GTPU_IPV6);
11844                 else if (!strcmp(name, "GTPU"))
11845                         new_pctype =
11846                                 i40e_find_customized_pctype(pf,
11847                                                       I40E_CUSTOMIZED_GTPU);
11848                 if (new_pctype) {
11849                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11850                                 new_pctype->pctype = pctype_value;
11851                                 new_pctype->valid = true;
11852                         } else {
11853                                 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11854                                 new_pctype->valid = false;
11855                         }
11856                 }
11857         }
11858
11859         rte_free(pctype);
11860         return 0;
11861 }
11862
11863 static int
11864 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11865                              uint32_t pkg_size, uint32_t proto_num,
11866                              struct rte_pmd_i40e_proto_info *proto,
11867                              enum rte_pmd_i40e_package_op op)
11868 {
11869         struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11870         uint16_t port_id = dev->data->port_id;
11871         uint32_t ptype_num;
11872         struct rte_pmd_i40e_ptype_info *ptype;
11873         uint32_t buff_size;
11874         uint8_t proto_id;
11875         char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11876         uint32_t i, j, n;
11877         bool in_tunnel;
11878         int ret;
11879
11880         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11881             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11882                 PMD_DRV_LOG(ERR, "Unsupported operation.");
11883                 return -1;
11884         }
11885
11886         if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
11887                 rte_pmd_i40e_ptype_mapping_reset(port_id);
11888                 return 0;
11889         }
11890
11891         /* get information about new ptype num */
11892         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11893                                 (uint8_t *)&ptype_num, sizeof(ptype_num),
11894                                 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11895         if (ret) {
11896                 PMD_DRV_LOG(ERR, "Failed to get ptype number");
11897                 return ret;
11898         }
11899         if (!ptype_num) {
11900                 PMD_DRV_LOG(INFO, "No new ptype added");
11901                 return -1;
11902         }
11903
11904         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11905         ptype = rte_zmalloc("new_ptype", buff_size, 0);
11906         if (!ptype) {
11907                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11908                 return -1;
11909         }
11910
11911         /* get information about new ptype list */
11912         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11913                                         (uint8_t *)ptype, buff_size,
11914                                         RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11915         if (ret) {
11916                 PMD_DRV_LOG(ERR, "Failed to get ptype list");
11917                 rte_free(ptype);
11918                 return ret;
11919         }
11920
11921         buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11922         ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11923         if (!ptype_mapping) {
11924                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11925                 rte_free(ptype);
11926                 return -1;
11927         }
11928
11929         /* Update ptype mapping table. */
11930         for (i = 0; i < ptype_num; i++) {
11931                 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11932                 ptype_mapping[i].sw_ptype = 0;
11933                 in_tunnel = false;
11934                 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11935                         proto_id = ptype[i].protocols[j];
11936                         if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11937                                 continue;
11938                         for (n = 0; n < proto_num; n++) {
11939                                 if (proto[n].proto_id != proto_id)
11940                                         continue;
11941                                 memset(name, 0, sizeof(name));
11942                                 strcpy(name, proto[n].name);
11943                                 if (!strncasecmp(name, "PPPOE", 5))
11944                                         ptype_mapping[i].sw_ptype |=
11945                                                 RTE_PTYPE_L2_ETHER_PPPOE;
11946                                 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11947                                          !in_tunnel) {
11948                                         ptype_mapping[i].sw_ptype |=
11949                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11950                                         ptype_mapping[i].sw_ptype |=
11951                                                 RTE_PTYPE_L4_FRAG;
11952                                 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11953                                            in_tunnel) {
11954                                         ptype_mapping[i].sw_ptype |=
11955                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11956                                         ptype_mapping[i].sw_ptype |=
11957                                                 RTE_PTYPE_INNER_L4_FRAG;
11958                                 } else if (!strncasecmp(name, "OIPV4", 5)) {
11959                                         ptype_mapping[i].sw_ptype |=
11960                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11961                                         in_tunnel = true;
11962                                 } else if (!strncasecmp(name, "IPV4", 4) &&
11963                                            !in_tunnel)
11964                                         ptype_mapping[i].sw_ptype |=
11965                                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11966                                 else if (!strncasecmp(name, "IPV4", 4) &&
11967                                          in_tunnel)
11968                                         ptype_mapping[i].sw_ptype |=
11969                                             RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11970                                 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11971                                          !in_tunnel) {
11972                                         ptype_mapping[i].sw_ptype |=
11973                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11974                                         ptype_mapping[i].sw_ptype |=
11975                                                 RTE_PTYPE_L4_FRAG;
11976                                 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11977                                            in_tunnel) {
11978                                         ptype_mapping[i].sw_ptype |=
11979                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11980                                         ptype_mapping[i].sw_ptype |=
11981                                                 RTE_PTYPE_INNER_L4_FRAG;
11982                                 } else if (!strncasecmp(name, "OIPV6", 5)) {
11983                                         ptype_mapping[i].sw_ptype |=
11984                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11985                                         in_tunnel = true;
11986                                 } else if (!strncasecmp(name, "IPV6", 4) &&
11987                                            !in_tunnel)
11988                                         ptype_mapping[i].sw_ptype |=
11989                                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11990                                 else if (!strncasecmp(name, "IPV6", 4) &&
11991                                          in_tunnel)
11992                                         ptype_mapping[i].sw_ptype |=
11993                                             RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11994                                 else if (!strncasecmp(name, "UDP", 3) &&
11995                                          !in_tunnel)
11996                                         ptype_mapping[i].sw_ptype |=
11997                                                 RTE_PTYPE_L4_UDP;
11998                                 else if (!strncasecmp(name, "UDP", 3) &&
11999                                          in_tunnel)
12000                                         ptype_mapping[i].sw_ptype |=
12001                                                 RTE_PTYPE_INNER_L4_UDP;
12002                                 else if (!strncasecmp(name, "TCP", 3) &&
12003                                          !in_tunnel)
12004                                         ptype_mapping[i].sw_ptype |=
12005                                                 RTE_PTYPE_L4_TCP;
12006                                 else if (!strncasecmp(name, "TCP", 3) &&
12007                                          in_tunnel)
12008                                         ptype_mapping[i].sw_ptype |=
12009                                                 RTE_PTYPE_INNER_L4_TCP;
12010                                 else if (!strncasecmp(name, "SCTP", 4) &&
12011                                          !in_tunnel)
12012                                         ptype_mapping[i].sw_ptype |=
12013                                                 RTE_PTYPE_L4_SCTP;
12014                                 else if (!strncasecmp(name, "SCTP", 4) &&
12015                                          in_tunnel)
12016                                         ptype_mapping[i].sw_ptype |=
12017                                                 RTE_PTYPE_INNER_L4_SCTP;
12018                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12019                                           !strncasecmp(name, "ICMPV6", 6)) &&
12020                                          !in_tunnel)
12021                                         ptype_mapping[i].sw_ptype |=
12022                                                 RTE_PTYPE_L4_ICMP;
12023                                 else if ((!strncasecmp(name, "ICMP", 4) ||
12024                                           !strncasecmp(name, "ICMPV6", 6)) &&
12025                                          in_tunnel)
12026                                         ptype_mapping[i].sw_ptype |=
12027                                                 RTE_PTYPE_INNER_L4_ICMP;
12028                                 else if (!strncasecmp(name, "GTPC", 4)) {
12029                                         ptype_mapping[i].sw_ptype |=
12030                                                 RTE_PTYPE_TUNNEL_GTPC;
12031                                         in_tunnel = true;
12032                                 } else if (!strncasecmp(name, "GTPU", 4)) {
12033                                         ptype_mapping[i].sw_ptype |=
12034                                                 RTE_PTYPE_TUNNEL_GTPU;
12035                                         in_tunnel = true;
12036                                 } else if (!strncasecmp(name, "GRENAT", 6)) {
12037                                         ptype_mapping[i].sw_ptype |=
12038                                                 RTE_PTYPE_TUNNEL_GRENAT;
12039                                         in_tunnel = true;
12040                                 } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
12041                                         ptype_mapping[i].sw_ptype |=
12042                                                 RTE_PTYPE_TUNNEL_L2TP;
12043                                         in_tunnel = true;
12044                                 }
12045
12046                                 break;
12047                         }
12048                 }
12049         }
12050
12051         ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12052                                                 ptype_num, 0);
12053         if (ret)
12054                 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
12055
12056         rte_free(ptype_mapping);
12057         rte_free(ptype);
12058         return ret;
12059 }
12060
12061 void
12062 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12063                             uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12064 {
12065         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12066         uint32_t proto_num;
12067         struct rte_pmd_i40e_proto_info *proto;
12068         uint32_t buff_size;
12069         uint32_t i;
12070         int ret;
12071
12072         if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12073             op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12074                 PMD_DRV_LOG(ERR, "Unsupported operation.");
12075                 return;
12076         }
12077
12078         /* get information about protocol number */
12079         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12080                                        (uint8_t *)&proto_num, sizeof(proto_num),
12081                                        RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12082         if (ret) {
12083                 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12084                 return;
12085         }
12086         if (!proto_num) {
12087                 PMD_DRV_LOG(INFO, "No new protocol added");
12088                 return;
12089         }
12090
12091         buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12092         proto = rte_zmalloc("new_proto", buff_size, 0);
12093         if (!proto) {
12094                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12095                 return;
12096         }
12097
12098         /* get information about protocol list */
12099         ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12100                                         (uint8_t *)proto, buff_size,
12101                                         RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12102         if (ret) {
12103                 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12104                 rte_free(proto);
12105                 return;
12106         }
12107
12108         /* Check if GTP is supported. */
12109         for (i = 0; i < proto_num; i++) {
12110                 if (!strncmp(proto[i].name, "GTP", 3)) {
12111                         if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12112                                 pf->gtp_support = true;
12113                         else
12114                                 pf->gtp_support = false;
12115                         break;
12116                 }
12117         }
12118
12119         /* Update customized pctype info */
12120         ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12121                                             proto_num, proto, op);
12122         if (ret)
12123                 PMD_DRV_LOG(INFO, "No pctype is updated.");
12124
12125         /* Update customized ptype info */
12126         ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12127                                            proto_num, proto, op);
12128         if (ret)
12129                 PMD_DRV_LOG(INFO, "No ptype is updated.");
12130
12131         rte_free(proto);
12132 }
12133
12134 /* Create a QinQ cloud filter
12135  *
12136  * The Fortville NIC has limited resources for tunnel filters,
12137  * so we can only reuse existing filters.
12138  *
12139  * In step 1 we define which Field Vector fields can be used for
12140  * filter types.
12141  * As we do not have the inner tag defined as a field,
12142  * we have to define it first, by reusing one of L1 entries.
12143  *
12144  * In step 2 we are replacing one of existing filter types with
12145  * a new one for QinQ.
12146  * As we reusing L1 and replacing L2, some of the default filter
12147  * types will disappear,which depends on L1 and L2 entries we reuse.
12148  *
12149  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12150  *
12151  * 1.   Create L1 filter of outer vlan (12b) which will be in use
12152  *              later when we define the cloud filter.
12153  *      a.      Valid_flags.replace_cloud = 0
12154  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
12155  *      c.      New_filter = 0x10
12156  *      d.      TR bit = 0xff (optional, not used here)
12157  *      e.      Buffer â€“ 2 entries:
12158  *              i.      Byte 0 = 8 (outer vlan FV index).
12159  *                      Byte 1 = 0 (rsv)
12160  *                      Byte 2-3 = 0x0fff
12161  *              ii.     Byte 0 = 37 (inner vlan FV index).
12162  *                      Byte 1 =0 (rsv)
12163  *                      Byte 2-3 = 0x0fff
12164  *
12165  * Step 2:
12166  * 2.   Create cloud filter using two L1 filters entries: stag and
12167  *              new filter(outer vlan+ inner vlan)
12168  *      a.      Valid_flags.replace_cloud = 1
12169  *      b.      Old_filter = 1 (instead of outer IP)
12170  *      c.      New_filter = 0x10
12171  *      d.      Buffer â€“ 2 entries:
12172  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
12173  *                      Byte 1-3 = 0 (rsv)
12174  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12175  *                      Byte 9-11 = 0 (rsv)
12176  */
12177 static int
12178 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12179 {
12180         int ret = -ENOTSUP;
12181         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12182         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12183         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12184
12185         if (pf->support_multi_driver) {
12186                 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12187                 return ret;
12188         }
12189
12190         /* Init */
12191         memset(&filter_replace, 0,
12192                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12193         memset(&filter_replace_buf, 0,
12194                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12195
12196         /* create L1 filter */
12197         filter_replace.old_filter_type =
12198                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12199         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12200         filter_replace.tr_bit = 0;
12201
12202         /* Prepare the buffer, 2 entries */
12203         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12204         filter_replace_buf.data[0] |=
12205                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12206         /* Field Vector 12b mask */
12207         filter_replace_buf.data[2] = 0xff;
12208         filter_replace_buf.data[3] = 0x0f;
12209         filter_replace_buf.data[4] =
12210                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12211         filter_replace_buf.data[4] |=
12212                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12213         /* Field Vector 12b mask */
12214         filter_replace_buf.data[6] = 0xff;
12215         filter_replace_buf.data[7] = 0x0f;
12216         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12217                         &filter_replace_buf);
12218         if (ret != I40E_SUCCESS)
12219                 return ret;
12220         PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12221                     "cloud l1 type is changed from 0x%x to 0x%x",
12222                     filter_replace.old_filter_type,
12223                     filter_replace.new_filter_type);
12224
12225         /* Apply the second L2 cloud filter */
12226         memset(&filter_replace, 0,
12227                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12228         memset(&filter_replace_buf, 0,
12229                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12230
12231         /* create L2 filter, input for L2 filter will be L1 filter  */
12232         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12233         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12234         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12235
12236         /* Prepare the buffer, 2 entries */
12237         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12238         filter_replace_buf.data[0] |=
12239                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12240         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12241         filter_replace_buf.data[4] |=
12242                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12243         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12244                         &filter_replace_buf);
12245         if (!ret) {
12246                 i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
12247                 PMD_DRV_LOG(DEBUG, "Global configuration modification: "
12248                             "cloud filter type is changed from 0x%x to 0x%x",
12249                             filter_replace.old_filter_type,
12250                             filter_replace.new_filter_type);
12251         }
12252         return ret;
12253 }
12254
12255 int
12256 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12257                    const struct rte_flow_action_rss *in)
12258 {
12259         if (in->key_len > RTE_DIM(out->key) ||
12260             in->queue_num > RTE_DIM(out->queue))
12261                 return -EINVAL;
12262         out->conf = (struct rte_flow_action_rss){
12263                 .func = in->func,
12264                 .level = in->level,
12265                 .types = in->types,
12266                 .key_len = in->key_len,
12267                 .queue_num = in->queue_num,
12268                 .key = memcpy(out->key, in->key, in->key_len),
12269                 .queue = memcpy(out->queue, in->queue,
12270                                 sizeof(*in->queue) * in->queue_num),
12271         };
12272         return 0;
12273 }
12274
12275 int
12276 i40e_action_rss_same(const struct rte_flow_action_rss *comp,
12277                      const struct rte_flow_action_rss *with)
12278 {
12279         return (comp->func == with->func &&
12280                 comp->level == with->level &&
12281                 comp->types == with->types &&
12282                 comp->key_len == with->key_len &&
12283                 comp->queue_num == with->queue_num &&
12284                 !memcmp(comp->key, with->key, with->key_len) &&
12285                 !memcmp(comp->queue, with->queue,
12286                         sizeof(*with->queue) * with->queue_num));
12287 }
12288
12289 int
12290 i40e_config_rss_filter(struct i40e_pf *pf,
12291                 struct i40e_rte_flow_rss_conf *conf, bool add)
12292 {
12293         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12294         uint32_t i, lut = 0;
12295         uint16_t j, num;
12296         struct rte_eth_rss_conf rss_conf = {
12297                 .rss_key = conf->conf.key_len ?
12298                         (void *)(uintptr_t)conf->conf.key : NULL,
12299                 .rss_key_len = conf->conf.key_len,
12300                 .rss_hf = conf->conf.types,
12301         };
12302         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12303
12304         if (!add) {
12305                 if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
12306                         i40e_pf_disable_rss(pf);
12307                         memset(rss_info, 0,
12308                                 sizeof(struct i40e_rte_flow_rss_conf));
12309                         return 0;
12310                 }
12311                 return -EINVAL;
12312         }
12313
12314         if (rss_info->conf.queue_num)
12315                 return -EINVAL;
12316
12317         /* If both VMDQ and RSS enabled, not all of PF queues are configured.
12318          * It's necessary to calculate the actual PF queues that are configured.
12319          */
12320         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12321                 num = i40e_pf_calc_configured_queues_num(pf);
12322         else
12323                 num = pf->dev_data->nb_rx_queues;
12324
12325         num = RTE_MIN(num, conf->conf.queue_num);
12326         PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12327                         num);
12328
12329         if (num == 0) {
12330                 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
12331                 return -ENOTSUP;
12332         }
12333
12334         /* Fill in redirection table */
12335         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12336                 if (j == num)
12337                         j = 0;
12338                 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12339                         hw->func_caps.rss_table_entry_width) - 1));
12340                 if ((i & 3) == 3)
12341                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12342         }
12343
12344         if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
12345                 i40e_pf_disable_rss(pf);
12346                 return 0;
12347         }
12348         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12349                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12350                 /* Random default keys */
12351                 static uint32_t rss_key_default[] = {0x6b793944,
12352                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12353                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12354                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12355
12356                 rss_conf.rss_key = (uint8_t *)rss_key_default;
12357                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12358                                                         sizeof(uint32_t);
12359         }
12360
12361         i40e_hw_rss_hash_set(pf, &rss_conf);
12362
12363         if (i40e_rss_conf_init(rss_info, &conf->conf))
12364                 return -EINVAL;
12365
12366         return 0;
12367 }
12368
12369 RTE_INIT(i40e_init_log);
12370 static void
12371 i40e_init_log(void)
12372 {
12373         i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12374         if (i40e_logtype_init >= 0)
12375                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12376         i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12377         if (i40e_logtype_driver >= 0)
12378                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12379 }
12380
12381 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12382                               QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12383                               ETH_I40E_SUPPORT_MULTI_DRIVER "=1");