net/i40e: fix division by 0
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40 #include <inttypes.h>
41 #include <assert.h>
42
43 #include <rte_eal.h>
44 #include <rte_string_fns.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memzone.h>
50 #include <rte_malloc.h>
51 #include <rte_memcpy.h>
52 #include <rte_alarm.h>
53 #include <rte_dev.h>
54 #include <rte_eth_ctrl.h>
55 #include <rte_tailq.h>
56 #include <rte_hash_crc.h>
57
58 #include "i40e_logs.h"
59 #include "base/i40e_prototype.h"
60 #include "base/i40e_adminq_cmd.h"
61 #include "base/i40e_type.h"
62 #include "base/i40e_register.h"
63 #include "base/i40e_dcb.h"
64 #include "i40e_ethdev.h"
65 #include "i40e_rxtx.h"
66 #include "i40e_pf.h"
67 #include "i40e_regs.h"
68
69 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
70 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
71
72 #define I40E_CLEAR_PXE_WAIT_MS     200
73
74 /* Maximun number of capability elements */
75 #define I40E_MAX_CAP_ELE_NUM       128
76
77 /* Wait count and interval */
78 #define I40E_CHK_Q_ENA_COUNT       1000
79 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
80
81 /* Maximun number of VSI */
82 #define I40E_MAX_NUM_VSIS          (384UL)
83
84 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
85
86 /* Flow control default timer */
87 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
88
89 /* Flow control default high water */
90 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
91
92 /* Flow control default low water */
93 #define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
94
95 /* Flow control enable fwd bit */
96 #define I40E_PRTMAC_FWD_CTRL   0x00000001
97
98 /* Receive Packet Buffer size */
99 #define I40E_RXPBSIZE (968 * 1024)
100
101 /* Kilobytes shift */
102 #define I40E_KILOSHIFT 10
103
104 /* Receive Average Packet Size in Byte*/
105 #define I40E_PACKET_AVERAGE_SIZE 128
106
107 /* Mask of PF interrupt causes */
108 #define I40E_PFINT_ICR0_ENA_MASK ( \
109                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
110                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
111                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
112                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
113                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
114                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
115                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
116                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
117                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
118
119 #define I40E_FLOW_TYPES ( \
120         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
121         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
122         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
123         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
124         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
125         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
126         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
127         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
128         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
129         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
130         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
131
132 /* Additional timesync values. */
133 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
134 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
135 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
136 #define I40E_PRTTSYN_TSYNENA     0x80000000
137 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
138 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
139
140 #define I40E_MAX_PERCENT            100
141 #define I40E_DEFAULT_DCB_APP_NUM    1
142 #define I40E_DEFAULT_DCB_APP_PRIO   3
143
144 /**
145  * Below are values for writing un-exposed registers suggested
146  * by silicon experts
147  */
148 /* Destination MAC address */
149 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
150 /* Source MAC address */
151 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
152 /* Outer (S-Tag) VLAN tag in the outer L2 header */
153 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
154 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
155 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
156 /* Single VLAN tag in the inner L2 header */
157 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
158 /* Source IPv4 address */
159 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
160 /* Destination IPv4 address */
161 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
162 /* Source IPv4 address for X722 */
163 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
164 /* Destination IPv4 address for X722 */
165 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
166 /* IPv4 Protocol for X722 */
167 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
168 /* IPv4 Time to Live for X722 */
169 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
170 /* IPv4 Type of Service (TOS) */
171 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
172 /* IPv4 Protocol */
173 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
174 /* IPv4 Time to Live */
175 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
176 /* Source IPv6 address */
177 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
178 /* Destination IPv6 address */
179 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
180 /* IPv6 Traffic Class (TC) */
181 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
182 /* IPv6 Next Header */
183 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
184 /* IPv6 Hop Limit */
185 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
186 /* Source L4 port */
187 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
188 /* Destination L4 port */
189 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
190 /* SCTP verification tag */
191 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
192 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
193 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
194 /* Source port of tunneling UDP */
195 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
196 /* Destination port of tunneling UDP */
197 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
198 /* UDP Tunneling ID, NVGRE/GRE key */
199 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
200 /* Last ether type */
201 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
202 /* Tunneling outer destination IPv4 address */
203 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
204 /* Tunneling outer destination IPv6 address */
205 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
206 /* 1st word of flex payload */
207 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
208 /* 2nd word of flex payload */
209 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
210 /* 3rd word of flex payload */
211 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
212 /* 4th word of flex payload */
213 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
214 /* 5th word of flex payload */
215 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
216 /* 6th word of flex payload */
217 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
218 /* 7th word of flex payload */
219 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
220 /* 8th word of flex payload */
221 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
222 /* all 8 words flex payload */
223 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
224 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
225
226 #define I40E_TRANSLATE_INSET 0
227 #define I40E_TRANSLATE_REG   1
228
229 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
230 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
231 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
232 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
233 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
234 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
235
236 /* PCI offset for querying capability */
237 #define PCI_DEV_CAP_REG            0xA4
238 /* PCI offset for enabling/disabling Extended Tag */
239 #define PCI_DEV_CTRL_REG           0xA8
240 /* Bit mask of Extended Tag capability */
241 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
242 /* Bit shift of Extended Tag enable/disable */
243 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
244 /* Bit mask of Extended Tag enable/disable */
245 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
246
247 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
248 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
249 static int i40e_dev_configure(struct rte_eth_dev *dev);
250 static int i40e_dev_start(struct rte_eth_dev *dev);
251 static void i40e_dev_stop(struct rte_eth_dev *dev);
252 static void i40e_dev_close(struct rte_eth_dev *dev);
253 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
254 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
255 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
256 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
257 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
258 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
259 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
260                                struct rte_eth_stats *stats);
261 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
262                                struct rte_eth_xstat *xstats, unsigned n);
263 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
264                                      struct rte_eth_xstat_name *xstats_names,
265                                      unsigned limit);
266 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
267 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
268                                             uint16_t queue_id,
269                                             uint8_t stat_idx,
270                                             uint8_t is_rx);
271 static int i40e_fw_version_get(struct rte_eth_dev *dev,
272                                 char *fw_version, size_t fw_size);
273 static void i40e_dev_info_get(struct rte_eth_dev *dev,
274                               struct rte_eth_dev_info *dev_info);
275 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
276                                 uint16_t vlan_id,
277                                 int on);
278 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
279                               enum rte_vlan_type vlan_type,
280                               uint16_t tpid);
281 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
282 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
283                                       uint16_t queue,
284                                       int on);
285 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
286 static int i40e_dev_led_on(struct rte_eth_dev *dev);
287 static int i40e_dev_led_off(struct rte_eth_dev *dev);
288 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
289                               struct rte_eth_fc_conf *fc_conf);
290 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
291                               struct rte_eth_fc_conf *fc_conf);
292 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
293                                        struct rte_eth_pfc_conf *pfc_conf);
294 static int i40e_macaddr_add(struct rte_eth_dev *dev,
295                             struct ether_addr *mac_addr,
296                             uint32_t index,
297                             uint32_t pool);
298 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
299 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
300                                     struct rte_eth_rss_reta_entry64 *reta_conf,
301                                     uint16_t reta_size);
302 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
303                                    struct rte_eth_rss_reta_entry64 *reta_conf,
304                                    uint16_t reta_size);
305
306 static int i40e_get_cap(struct i40e_hw *hw);
307 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
308 static int i40e_pf_setup(struct i40e_pf *pf);
309 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
310 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
311 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
312 static int i40e_dcb_setup(struct rte_eth_dev *dev);
313 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
314                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
315 static void i40e_stat_update_48(struct i40e_hw *hw,
316                                uint32_t hireg,
317                                uint32_t loreg,
318                                bool offset_loaded,
319                                uint64_t *offset,
320                                uint64_t *stat);
321 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
322 static void i40e_dev_interrupt_handler(void *param);
323 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
324                                 uint32_t base, uint32_t num);
325 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
326 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
327                         uint32_t base);
328 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
329                         uint16_t num);
330 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
331 static int i40e_veb_release(struct i40e_veb *veb);
332 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
333                                                 struct i40e_vsi *vsi);
334 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
335 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
336 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
337                                              struct i40e_macvlan_filter *mv_f,
338                                              int num,
339                                              uint16_t vlan);
340 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
341 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
342                                     struct rte_eth_rss_conf *rss_conf);
343 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
344                                       struct rte_eth_rss_conf *rss_conf);
345 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
346                                         struct rte_eth_udp_tunnel *udp_tunnel);
347 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
348                                         struct rte_eth_udp_tunnel *udp_tunnel);
349 static void i40e_filter_input_set_init(struct i40e_pf *pf);
350 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
351                                 enum rte_filter_op filter_op,
352                                 void *arg);
353 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
354                                 enum rte_filter_type filter_type,
355                                 enum rte_filter_op filter_op,
356                                 void *arg);
357 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
358                                   struct rte_eth_dcb_info *dcb_info);
359 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
360 static void i40e_configure_registers(struct i40e_hw *hw);
361 static void i40e_hw_init(struct rte_eth_dev *dev);
362 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
363 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
364                         struct rte_eth_mirror_conf *mirror_conf,
365                         uint8_t sw_id, uint8_t on);
366 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
367
368 static int i40e_timesync_enable(struct rte_eth_dev *dev);
369 static int i40e_timesync_disable(struct rte_eth_dev *dev);
370 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
371                                            struct timespec *timestamp,
372                                            uint32_t flags);
373 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
374                                            struct timespec *timestamp);
375 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
376
377 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
378
379 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
380                                    struct timespec *timestamp);
381 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
382                                     const struct timespec *timestamp);
383
384 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
385                                          uint16_t queue_id);
386 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
387                                           uint16_t queue_id);
388
389 static int i40e_get_regs(struct rte_eth_dev *dev,
390                          struct rte_dev_reg_info *regs);
391
392 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
393
394 static int i40e_get_eeprom(struct rte_eth_dev *dev,
395                            struct rte_dev_eeprom_info *eeprom);
396
397 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
398                                       struct ether_addr *mac_addr);
399
400 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
401
402 static int i40e_ethertype_filter_convert(
403         const struct rte_eth_ethertype_filter *input,
404         struct i40e_ethertype_filter *filter);
405 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
406                                    struct i40e_ethertype_filter *filter);
407
408 static int i40e_tunnel_filter_convert(
409         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
410         struct i40e_tunnel_filter *tunnel_filter);
411 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
412                                 struct i40e_tunnel_filter *tunnel_filter);
413 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
414
415 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
416 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
417 static void i40e_filter_restore(struct i40e_pf *pf);
418 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
419
420 int i40e_logtype_init;
421 int i40e_logtype_driver;
422
423 static const struct rte_pci_id pci_id_i40e_map[] = {
424         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
425         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
426         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
427         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
428         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
429         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
430         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
431         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
432         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
433         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
434         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
435         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
436         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
437         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
438         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
439         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
440         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
441         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
442         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
443         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
444         { .vendor_id = 0, /* sentinel */ },
445 };
446
447 static const struct eth_dev_ops i40e_eth_dev_ops = {
448         .dev_configure                = i40e_dev_configure,
449         .dev_start                    = i40e_dev_start,
450         .dev_stop                     = i40e_dev_stop,
451         .dev_close                    = i40e_dev_close,
452         .promiscuous_enable           = i40e_dev_promiscuous_enable,
453         .promiscuous_disable          = i40e_dev_promiscuous_disable,
454         .allmulticast_enable          = i40e_dev_allmulticast_enable,
455         .allmulticast_disable         = i40e_dev_allmulticast_disable,
456         .dev_set_link_up              = i40e_dev_set_link_up,
457         .dev_set_link_down            = i40e_dev_set_link_down,
458         .link_update                  = i40e_dev_link_update,
459         .stats_get                    = i40e_dev_stats_get,
460         .xstats_get                   = i40e_dev_xstats_get,
461         .xstats_get_names             = i40e_dev_xstats_get_names,
462         .stats_reset                  = i40e_dev_stats_reset,
463         .xstats_reset                 = i40e_dev_stats_reset,
464         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
465         .fw_version_get               = i40e_fw_version_get,
466         .dev_infos_get                = i40e_dev_info_get,
467         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
468         .vlan_filter_set              = i40e_vlan_filter_set,
469         .vlan_tpid_set                = i40e_vlan_tpid_set,
470         .vlan_offload_set             = i40e_vlan_offload_set,
471         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
472         .vlan_pvid_set                = i40e_vlan_pvid_set,
473         .rx_queue_start               = i40e_dev_rx_queue_start,
474         .rx_queue_stop                = i40e_dev_rx_queue_stop,
475         .tx_queue_start               = i40e_dev_tx_queue_start,
476         .tx_queue_stop                = i40e_dev_tx_queue_stop,
477         .rx_queue_setup               = i40e_dev_rx_queue_setup,
478         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
479         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
480         .rx_queue_release             = i40e_dev_rx_queue_release,
481         .rx_queue_count               = i40e_dev_rx_queue_count,
482         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
483         .rx_descriptor_status         = i40e_dev_rx_descriptor_status,
484         .tx_descriptor_status         = i40e_dev_tx_descriptor_status,
485         .tx_queue_setup               = i40e_dev_tx_queue_setup,
486         .tx_queue_release             = i40e_dev_tx_queue_release,
487         .dev_led_on                   = i40e_dev_led_on,
488         .dev_led_off                  = i40e_dev_led_off,
489         .flow_ctrl_get                = i40e_flow_ctrl_get,
490         .flow_ctrl_set                = i40e_flow_ctrl_set,
491         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
492         .mac_addr_add                 = i40e_macaddr_add,
493         .mac_addr_remove              = i40e_macaddr_remove,
494         .reta_update                  = i40e_dev_rss_reta_update,
495         .reta_query                   = i40e_dev_rss_reta_query,
496         .rss_hash_update              = i40e_dev_rss_hash_update,
497         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
498         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
499         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
500         .filter_ctrl                  = i40e_dev_filter_ctrl,
501         .rxq_info_get                 = i40e_rxq_info_get,
502         .txq_info_get                 = i40e_txq_info_get,
503         .mirror_rule_set              = i40e_mirror_rule_set,
504         .mirror_rule_reset            = i40e_mirror_rule_reset,
505         .timesync_enable              = i40e_timesync_enable,
506         .timesync_disable             = i40e_timesync_disable,
507         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
508         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
509         .get_dcb_info                 = i40e_dev_get_dcb_info,
510         .timesync_adjust_time         = i40e_timesync_adjust_time,
511         .timesync_read_time           = i40e_timesync_read_time,
512         .timesync_write_time          = i40e_timesync_write_time,
513         .get_reg                      = i40e_get_regs,
514         .get_eeprom_length            = i40e_get_eeprom_length,
515         .get_eeprom                   = i40e_get_eeprom,
516         .mac_addr_set                 = i40e_set_default_mac_addr,
517         .mtu_set                      = i40e_dev_mtu_set,
518 };
519
520 /* store statistics names and its offset in stats structure */
521 struct rte_i40e_xstats_name_off {
522         char name[RTE_ETH_XSTATS_NAME_SIZE];
523         unsigned offset;
524 };
525
526 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
527         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
528         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
529         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
530         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
531         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
532                 rx_unknown_protocol)},
533         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
534         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
535         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
536         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
537 };
538
539 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
540                 sizeof(rte_i40e_stats_strings[0]))
541
542 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
543         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
544                 tx_dropped_link_down)},
545         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
546         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
547                 illegal_bytes)},
548         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
549         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
550                 mac_local_faults)},
551         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
552                 mac_remote_faults)},
553         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
554                 rx_length_errors)},
555         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
556         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
557         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
558         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
559         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
560         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
561                 rx_size_127)},
562         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
563                 rx_size_255)},
564         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
565                 rx_size_511)},
566         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
567                 rx_size_1023)},
568         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
569                 rx_size_1522)},
570         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
571                 rx_size_big)},
572         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
573                 rx_undersize)},
574         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
575                 rx_oversize)},
576         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
577                 mac_short_packet_dropped)},
578         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
579                 rx_fragments)},
580         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
581         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
582         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
583                 tx_size_127)},
584         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
585                 tx_size_255)},
586         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
587                 tx_size_511)},
588         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
589                 tx_size_1023)},
590         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
591                 tx_size_1522)},
592         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
593                 tx_size_big)},
594         {"rx_flow_director_atr_match_packets",
595                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
596         {"rx_flow_director_sb_match_packets",
597                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
598         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
599                 tx_lpi_status)},
600         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
601                 rx_lpi_status)},
602         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
603                 tx_lpi_count)},
604         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
605                 rx_lpi_count)},
606 };
607
608 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
609                 sizeof(rte_i40e_hw_port_strings[0]))
610
611 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
612         {"xon_packets", offsetof(struct i40e_hw_port_stats,
613                 priority_xon_rx)},
614         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
615                 priority_xoff_rx)},
616 };
617
618 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
619                 sizeof(rte_i40e_rxq_prio_strings[0]))
620
621 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
622         {"xon_packets", offsetof(struct i40e_hw_port_stats,
623                 priority_xon_tx)},
624         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
625                 priority_xoff_tx)},
626         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
627                 priority_xon_2_xoff)},
628 };
629
630 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
631                 sizeof(rte_i40e_txq_prio_strings[0]))
632
633 static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
634         struct rte_pci_device *pci_dev)
635 {
636         return rte_eth_dev_pci_generic_probe(pci_dev,
637                 sizeof(struct i40e_adapter), eth_i40e_dev_init);
638 }
639
640 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
641 {
642         return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
643 }
644
645 static struct rte_pci_driver rte_i40e_pmd = {
646         .id_table = pci_id_i40e_map,
647         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
648         .probe = eth_i40e_pci_probe,
649         .remove = eth_i40e_pci_remove,
650 };
651
652 static inline int
653 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
654                                      struct rte_eth_link *link)
655 {
656         struct rte_eth_link *dst = link;
657         struct rte_eth_link *src = &(dev->data->dev_link);
658
659         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
660                                         *(uint64_t *)src) == 0)
661                 return -1;
662
663         return 0;
664 }
665
666 static inline int
667 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
668                                       struct rte_eth_link *link)
669 {
670         struct rte_eth_link *dst = &(dev->data->dev_link);
671         struct rte_eth_link *src = link;
672
673         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
674                                         *(uint64_t *)src) == 0)
675                 return -1;
676
677         return 0;
678 }
679
680 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
681 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
682 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
683
684 #ifndef I40E_GLQF_ORT
685 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
686 #endif
687 #ifndef I40E_GLQF_PIT
688 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
689 #endif
690 #ifndef I40E_GLQF_L3_MAP
691 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
692 #endif
693
694 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
695 {
696         /*
697          * Initialize registers for flexible payload, which should be set by NVM.
698          * This should be removed from code once it is fixed in NVM.
699          */
700         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
701         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
702         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
703         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
704         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
705         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
706         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
707         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
708         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
709         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
710         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
711         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
712
713         /* Initialize registers for parsing packet type of QinQ */
714         I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
715         I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
716 }
717
718 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
719
720 /*
721  * Add a ethertype filter to drop all flow control frames transmitted
722  * from VSIs.
723 */
724 static void
725 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
726 {
727         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
728         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
729                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
730                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
731         int ret;
732
733         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
734                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
735                                 pf->main_vsi_seid, 0,
736                                 TRUE, NULL, NULL);
737         if (ret)
738                 PMD_INIT_LOG(ERR,
739                         "Failed to add filter to drop flow control frames from VSIs.");
740 }
741
742 static int
743 floating_veb_list_handler(__rte_unused const char *key,
744                           const char *floating_veb_value,
745                           void *opaque)
746 {
747         int idx = 0;
748         unsigned int count = 0;
749         char *end = NULL;
750         int min, max;
751         bool *vf_floating_veb = opaque;
752
753         while (isblank(*floating_veb_value))
754                 floating_veb_value++;
755
756         /* Reset floating VEB configuration for VFs */
757         for (idx = 0; idx < I40E_MAX_VF; idx++)
758                 vf_floating_veb[idx] = false;
759
760         min = I40E_MAX_VF;
761         do {
762                 while (isblank(*floating_veb_value))
763                         floating_veb_value++;
764                 if (*floating_veb_value == '\0')
765                         return -1;
766                 errno = 0;
767                 idx = strtoul(floating_veb_value, &end, 10);
768                 if (errno || end == NULL)
769                         return -1;
770                 while (isblank(*end))
771                         end++;
772                 if (*end == '-') {
773                         min = idx;
774                 } else if ((*end == ';') || (*end == '\0')) {
775                         max = idx;
776                         if (min == I40E_MAX_VF)
777                                 min = idx;
778                         if (max >= I40E_MAX_VF)
779                                 max = I40E_MAX_VF - 1;
780                         for (idx = min; idx <= max; idx++) {
781                                 vf_floating_veb[idx] = true;
782                                 count++;
783                         }
784                         min = I40E_MAX_VF;
785                 } else {
786                         return -1;
787                 }
788                 floating_veb_value = end + 1;
789         } while (*end != '\0');
790
791         if (count == 0)
792                 return -1;
793
794         return 0;
795 }
796
797 static void
798 config_vf_floating_veb(struct rte_devargs *devargs,
799                        uint16_t floating_veb,
800                        bool *vf_floating_veb)
801 {
802         struct rte_kvargs *kvlist;
803         int i;
804         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
805
806         if (!floating_veb)
807                 return;
808         /* All the VFs attach to the floating VEB by default
809          * when the floating VEB is enabled.
810          */
811         for (i = 0; i < I40E_MAX_VF; i++)
812                 vf_floating_veb[i] = true;
813
814         if (devargs == NULL)
815                 return;
816
817         kvlist = rte_kvargs_parse(devargs->args, NULL);
818         if (kvlist == NULL)
819                 return;
820
821         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
822                 rte_kvargs_free(kvlist);
823                 return;
824         }
825         /* When the floating_veb_list parameter exists, all the VFs
826          * will attach to the legacy VEB firstly, then configure VFs
827          * to the floating VEB according to the floating_veb_list.
828          */
829         if (rte_kvargs_process(kvlist, floating_veb_list,
830                                floating_veb_list_handler,
831                                vf_floating_veb) < 0) {
832                 rte_kvargs_free(kvlist);
833                 return;
834         }
835         rte_kvargs_free(kvlist);
836 }
837
838 static int
839 i40e_check_floating_handler(__rte_unused const char *key,
840                             const char *value,
841                             __rte_unused void *opaque)
842 {
843         if (strcmp(value, "1"))
844                 return -1;
845
846         return 0;
847 }
848
849 static int
850 is_floating_veb_supported(struct rte_devargs *devargs)
851 {
852         struct rte_kvargs *kvlist;
853         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
854
855         if (devargs == NULL)
856                 return 0;
857
858         kvlist = rte_kvargs_parse(devargs->args, NULL);
859         if (kvlist == NULL)
860                 return 0;
861
862         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
863                 rte_kvargs_free(kvlist);
864                 return 0;
865         }
866         /* Floating VEB is enabled when there's key-value:
867          * enable_floating_veb=1
868          */
869         if (rte_kvargs_process(kvlist, floating_veb_key,
870                                i40e_check_floating_handler, NULL) < 0) {
871                 rte_kvargs_free(kvlist);
872                 return 0;
873         }
874         rte_kvargs_free(kvlist);
875
876         return 1;
877 }
878
879 static void
880 config_floating_veb(struct rte_eth_dev *dev)
881 {
882         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
883         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
884         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
885
886         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
887
888         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
889                 pf->floating_veb =
890                         is_floating_veb_supported(pci_dev->device.devargs);
891                 config_vf_floating_veb(pci_dev->device.devargs,
892                                        pf->floating_veb,
893                                        pf->floating_veb_list);
894         } else {
895                 pf->floating_veb = false;
896         }
897 }
898
899 #define I40E_L2_TAGS_S_TAG_SHIFT 1
900 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
901
902 static int
903 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
904 {
905         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
906         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
907         char ethertype_hash_name[RTE_HASH_NAMESIZE];
908         int ret;
909
910         struct rte_hash_parameters ethertype_hash_params = {
911                 .name = ethertype_hash_name,
912                 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
913                 .key_len = sizeof(struct i40e_ethertype_filter_input),
914                 .hash_func = rte_hash_crc,
915                 .hash_func_init_val = 0,
916                 .socket_id = rte_socket_id(),
917         };
918
919         /* Initialize ethertype filter rule list and hash */
920         TAILQ_INIT(&ethertype_rule->ethertype_list);
921         snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
922                  "ethertype_%s", dev->device->name);
923         ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
924         if (!ethertype_rule->hash_table) {
925                 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
926                 return -EINVAL;
927         }
928         ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
929                                        sizeof(struct i40e_ethertype_filter *) *
930                                        I40E_MAX_ETHERTYPE_FILTER_NUM,
931                                        0);
932         if (!ethertype_rule->hash_map) {
933                 PMD_INIT_LOG(ERR,
934                              "Failed to allocate memory for ethertype hash map!");
935                 ret = -ENOMEM;
936                 goto err_ethertype_hash_map_alloc;
937         }
938
939         return 0;
940
941 err_ethertype_hash_map_alloc:
942         rte_hash_free(ethertype_rule->hash_table);
943
944         return ret;
945 }
946
947 static int
948 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
949 {
950         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
951         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
952         char tunnel_hash_name[RTE_HASH_NAMESIZE];
953         int ret;
954
955         struct rte_hash_parameters tunnel_hash_params = {
956                 .name = tunnel_hash_name,
957                 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
958                 .key_len = sizeof(struct i40e_tunnel_filter_input),
959                 .hash_func = rte_hash_crc,
960                 .hash_func_init_val = 0,
961                 .socket_id = rte_socket_id(),
962         };
963
964         /* Initialize tunnel filter rule list and hash */
965         TAILQ_INIT(&tunnel_rule->tunnel_list);
966         snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
967                  "tunnel_%s", dev->device->name);
968         tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
969         if (!tunnel_rule->hash_table) {
970                 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
971                 return -EINVAL;
972         }
973         tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
974                                     sizeof(struct i40e_tunnel_filter *) *
975                                     I40E_MAX_TUNNEL_FILTER_NUM,
976                                     0);
977         if (!tunnel_rule->hash_map) {
978                 PMD_INIT_LOG(ERR,
979                              "Failed to allocate memory for tunnel hash map!");
980                 ret = -ENOMEM;
981                 goto err_tunnel_hash_map_alloc;
982         }
983
984         return 0;
985
986 err_tunnel_hash_map_alloc:
987         rte_hash_free(tunnel_rule->hash_table);
988
989         return ret;
990 }
991
992 static int
993 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
994 {
995         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
996         struct i40e_fdir_info *fdir_info = &pf->fdir;
997         char fdir_hash_name[RTE_HASH_NAMESIZE];
998         int ret;
999
1000         struct rte_hash_parameters fdir_hash_params = {
1001                 .name = fdir_hash_name,
1002                 .entries = I40E_MAX_FDIR_FILTER_NUM,
1003                 .key_len = sizeof(struct rte_eth_fdir_input),
1004                 .hash_func = rte_hash_crc,
1005                 .hash_func_init_val = 0,
1006                 .socket_id = rte_socket_id(),
1007         };
1008
1009         /* Initialize flow director filter rule list and hash */
1010         TAILQ_INIT(&fdir_info->fdir_list);
1011         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1012                  "fdir_%s", dev->device->name);
1013         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1014         if (!fdir_info->hash_table) {
1015                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1016                 return -EINVAL;
1017         }
1018         fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1019                                           sizeof(struct i40e_fdir_filter *) *
1020                                           I40E_MAX_FDIR_FILTER_NUM,
1021                                           0);
1022         if (!fdir_info->hash_map) {
1023                 PMD_INIT_LOG(ERR,
1024                              "Failed to allocate memory for fdir hash map!");
1025                 ret = -ENOMEM;
1026                 goto err_fdir_hash_map_alloc;
1027         }
1028         return 0;
1029
1030 err_fdir_hash_map_alloc:
1031         rte_hash_free(fdir_info->hash_table);
1032
1033         return ret;
1034 }
1035
1036 static int
1037 eth_i40e_dev_init(struct rte_eth_dev *dev)
1038 {
1039         struct rte_pci_device *pci_dev;
1040         struct rte_intr_handle *intr_handle;
1041         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1042         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1043         struct i40e_vsi *vsi;
1044         int ret;
1045         uint32_t len;
1046         uint8_t aq_fail = 0;
1047
1048         PMD_INIT_FUNC_TRACE();
1049
1050         dev->dev_ops = &i40e_eth_dev_ops;
1051         dev->rx_pkt_burst = i40e_recv_pkts;
1052         dev->tx_pkt_burst = i40e_xmit_pkts;
1053         dev->tx_pkt_prepare = i40e_prep_pkts;
1054
1055         /* for secondary processes, we don't initialise any further as primary
1056          * has already done this work. Only check we don't need a different
1057          * RX function */
1058         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1059                 i40e_set_rx_function(dev);
1060                 i40e_set_tx_function(dev);
1061                 return 0;
1062         }
1063         i40e_set_default_ptype_table(dev);
1064         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1065         intr_handle = &pci_dev->intr_handle;
1066
1067         rte_eth_copy_pci_info(dev, pci_dev);
1068         dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1069
1070         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1071         pf->adapter->eth_dev = dev;
1072         pf->dev_data = dev->data;
1073
1074         hw->back = I40E_PF_TO_ADAPTER(pf);
1075         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1076         if (!hw->hw_addr) {
1077                 PMD_INIT_LOG(ERR,
1078                         "Hardware is not available, as address is NULL");
1079                 return -ENODEV;
1080         }
1081
1082         hw->vendor_id = pci_dev->id.vendor_id;
1083         hw->device_id = pci_dev->id.device_id;
1084         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1085         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1086         hw->bus.device = pci_dev->addr.devid;
1087         hw->bus.func = pci_dev->addr.function;
1088         hw->adapter_stopped = 0;
1089
1090         /* Make sure all is clean before doing PF reset */
1091         i40e_clear_hw(hw);
1092
1093         /* Initialize the hardware */
1094         i40e_hw_init(dev);
1095
1096         /* Reset here to make sure all is clean for each PF */
1097         ret = i40e_pf_reset(hw);
1098         if (ret) {
1099                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1100                 return ret;
1101         }
1102
1103         /* Initialize the shared code (base driver) */
1104         ret = i40e_init_shared_code(hw);
1105         if (ret) {
1106                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1107                 return ret;
1108         }
1109
1110         /*
1111          * To work around the NVM issue, initialize registers
1112          * for flexible payload and packet type of QinQ by
1113          * software. It should be removed once issues are fixed
1114          * in NVM.
1115          */
1116         i40e_GLQF_reg_init(hw);
1117
1118         /* Initialize the input set for filters (hash and fd) to default value */
1119         i40e_filter_input_set_init(pf);
1120
1121         /* Initialize the parameters for adminq */
1122         i40e_init_adminq_parameter(hw);
1123         ret = i40e_init_adminq(hw);
1124         if (ret != I40E_SUCCESS) {
1125                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1126                 return -EIO;
1127         }
1128         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1129                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1130                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1131                      ((hw->nvm.version >> 12) & 0xf),
1132                      ((hw->nvm.version >> 4) & 0xff),
1133                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1134
1135         /* initialise the L3_MAP register */
1136         ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
1137                                    0x00000028,  NULL);
1138         if (ret)
1139                 PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
1140
1141         /* Need the special FW version to support floating VEB */
1142         config_floating_veb(dev);
1143         /* Clear PXE mode */
1144         i40e_clear_pxe_mode(hw);
1145         i40e_dev_sync_phy_type(hw);
1146
1147         /*
1148          * On X710, performance number is far from the expectation on recent
1149          * firmware versions. The fix for this issue may not be integrated in
1150          * the following firmware version. So the workaround in software driver
1151          * is needed. It needs to modify the initial values of 3 internal only
1152          * registers. Note that the workaround can be removed when it is fixed
1153          * in firmware in the future.
1154          */
1155         i40e_configure_registers(hw);
1156
1157         /* Get hw capabilities */
1158         ret = i40e_get_cap(hw);
1159         if (ret != I40E_SUCCESS) {
1160                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1161                 goto err_get_capabilities;
1162         }
1163
1164         /* Initialize parameters for PF */
1165         ret = i40e_pf_parameter_init(dev);
1166         if (ret != 0) {
1167                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1168                 goto err_parameter_init;
1169         }
1170
1171         /* Initialize the queue management */
1172         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1173         if (ret < 0) {
1174                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1175                 goto err_qp_pool_init;
1176         }
1177         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1178                                 hw->func_caps.num_msix_vectors - 1);
1179         if (ret < 0) {
1180                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1181                 goto err_msix_pool_init;
1182         }
1183
1184         /* Initialize lan hmc */
1185         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1186                                 hw->func_caps.num_rx_qp, 0, 0);
1187         if (ret != I40E_SUCCESS) {
1188                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1189                 goto err_init_lan_hmc;
1190         }
1191
1192         /* Configure lan hmc */
1193         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1194         if (ret != I40E_SUCCESS) {
1195                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1196                 goto err_configure_lan_hmc;
1197         }
1198
1199         /* Get and check the mac address */
1200         i40e_get_mac_addr(hw, hw->mac.addr);
1201         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1202                 PMD_INIT_LOG(ERR, "mac address is not valid");
1203                 ret = -EIO;
1204                 goto err_get_mac_addr;
1205         }
1206         /* Copy the permanent MAC address */
1207         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1208                         (struct ether_addr *) hw->mac.perm_addr);
1209
1210         /* Disable flow control */
1211         hw->fc.requested_mode = I40E_FC_NONE;
1212         i40e_set_fc(hw, &aq_fail, TRUE);
1213
1214         /* Set the global registers with default ether type value */
1215         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1216         if (ret != I40E_SUCCESS) {
1217                 PMD_INIT_LOG(ERR,
1218                         "Failed to set the default outer VLAN ether type");
1219                 goto err_setup_pf_switch;
1220         }
1221
1222         /* PF setup, which includes VSI setup */
1223         ret = i40e_pf_setup(pf);
1224         if (ret) {
1225                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1226                 goto err_setup_pf_switch;
1227         }
1228
1229         /* reset all stats of the device, including pf and main vsi */
1230         i40e_dev_stats_reset(dev);
1231
1232         vsi = pf->main_vsi;
1233
1234         /* Disable double vlan by default */
1235         i40e_vsi_config_double_vlan(vsi, FALSE);
1236
1237         /* Disable S-TAG identification when floating_veb is disabled */
1238         if (!pf->floating_veb) {
1239                 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1240                 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1241                         ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1242                         I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1243                 }
1244         }
1245
1246         if (!vsi->max_macaddrs)
1247                 len = ETHER_ADDR_LEN;
1248         else
1249                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1250
1251         /* Should be after VSI initialized */
1252         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1253         if (!dev->data->mac_addrs) {
1254                 PMD_INIT_LOG(ERR,
1255                         "Failed to allocated memory for storing mac address");
1256                 goto err_mac_alloc;
1257         }
1258         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1259                                         &dev->data->mac_addrs[0]);
1260
1261         /* Init dcb to sw mode by default */
1262         ret = i40e_dcb_init_configure(dev, TRUE);
1263         if (ret != I40E_SUCCESS) {
1264                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1265                 pf->flags &= ~I40E_FLAG_DCB;
1266         }
1267         /* Update HW struct after DCB configuration */
1268         i40e_get_cap(hw);
1269
1270         /* initialize pf host driver to setup SRIOV resource if applicable */
1271         i40e_pf_host_init(dev);
1272
1273         /* register callback func to eal lib */
1274         rte_intr_callback_register(intr_handle,
1275                                    i40e_dev_interrupt_handler, dev);
1276
1277         /* configure and enable device interrupt */
1278         i40e_pf_config_irq0(hw, TRUE);
1279         i40e_pf_enable_irq0(hw);
1280
1281         /* enable uio intr after callback register */
1282         rte_intr_enable(intr_handle);
1283         /*
1284          * Add an ethertype filter to drop all flow control frames transmitted
1285          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1286          * frames to wire.
1287          */
1288         i40e_add_tx_flow_control_drop_filter(pf);
1289
1290         /* Set the max frame size to 0x2600 by default,
1291          * in case other drivers changed the default value.
1292          */
1293         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1294
1295         /* initialize mirror rule list */
1296         TAILQ_INIT(&pf->mirror_list);
1297
1298         ret = i40e_init_ethtype_filter_list(dev);
1299         if (ret < 0)
1300                 goto err_init_ethtype_filter_list;
1301         ret = i40e_init_tunnel_filter_list(dev);
1302         if (ret < 0)
1303                 goto err_init_tunnel_filter_list;
1304         ret = i40e_init_fdir_filter_list(dev);
1305         if (ret < 0)
1306                 goto err_init_fdir_filter_list;
1307
1308         return 0;
1309
1310 err_init_fdir_filter_list:
1311         rte_free(pf->tunnel.hash_table);
1312         rte_free(pf->tunnel.hash_map);
1313 err_init_tunnel_filter_list:
1314         rte_free(pf->ethertype.hash_table);
1315         rte_free(pf->ethertype.hash_map);
1316 err_init_ethtype_filter_list:
1317         rte_free(dev->data->mac_addrs);
1318 err_mac_alloc:
1319         i40e_vsi_release(pf->main_vsi);
1320 err_setup_pf_switch:
1321 err_get_mac_addr:
1322 err_configure_lan_hmc:
1323         (void)i40e_shutdown_lan_hmc(hw);
1324 err_init_lan_hmc:
1325         i40e_res_pool_destroy(&pf->msix_pool);
1326 err_msix_pool_init:
1327         i40e_res_pool_destroy(&pf->qp_pool);
1328 err_qp_pool_init:
1329 err_parameter_init:
1330 err_get_capabilities:
1331         (void)i40e_shutdown_adminq(hw);
1332
1333         return ret;
1334 }
1335
1336 static void
1337 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1338 {
1339         struct i40e_ethertype_filter *p_ethertype;
1340         struct i40e_ethertype_rule *ethertype_rule;
1341
1342         ethertype_rule = &pf->ethertype;
1343         /* Remove all ethertype filter rules and hash */
1344         if (ethertype_rule->hash_map)
1345                 rte_free(ethertype_rule->hash_map);
1346         if (ethertype_rule->hash_table)
1347                 rte_hash_free(ethertype_rule->hash_table);
1348
1349         while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1350                 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1351                              p_ethertype, rules);
1352                 rte_free(p_ethertype);
1353         }
1354 }
1355
1356 static void
1357 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1358 {
1359         struct i40e_tunnel_filter *p_tunnel;
1360         struct i40e_tunnel_rule *tunnel_rule;
1361
1362         tunnel_rule = &pf->tunnel;
1363         /* Remove all tunnel director rules and hash */
1364         if (tunnel_rule->hash_map)
1365                 rte_free(tunnel_rule->hash_map);
1366         if (tunnel_rule->hash_table)
1367                 rte_hash_free(tunnel_rule->hash_table);
1368
1369         while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1370                 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1371                 rte_free(p_tunnel);
1372         }
1373 }
1374
1375 static void
1376 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1377 {
1378         struct i40e_fdir_filter *p_fdir;
1379         struct i40e_fdir_info *fdir_info;
1380
1381         fdir_info = &pf->fdir;
1382         /* Remove all flow director rules and hash */
1383         if (fdir_info->hash_map)
1384                 rte_free(fdir_info->hash_map);
1385         if (fdir_info->hash_table)
1386                 rte_hash_free(fdir_info->hash_table);
1387
1388         while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1389                 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1390                 rte_free(p_fdir);
1391         }
1392 }
1393
1394 static int
1395 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1396 {
1397         struct i40e_pf *pf;
1398         struct rte_pci_device *pci_dev;
1399         struct rte_intr_handle *intr_handle;
1400         struct i40e_hw *hw;
1401         struct i40e_filter_control_settings settings;
1402         struct rte_flow *p_flow;
1403         int ret;
1404         uint8_t aq_fail = 0;
1405
1406         PMD_INIT_FUNC_TRACE();
1407
1408         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1409                 return 0;
1410
1411         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1412         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1413         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1414         intr_handle = &pci_dev->intr_handle;
1415
1416         if (hw->adapter_stopped == 0)
1417                 i40e_dev_close(dev);
1418
1419         dev->dev_ops = NULL;
1420         dev->rx_pkt_burst = NULL;
1421         dev->tx_pkt_burst = NULL;
1422
1423         /* Clear PXE mode */
1424         i40e_clear_pxe_mode(hw);
1425
1426         /* Unconfigure filter control */
1427         memset(&settings, 0, sizeof(settings));
1428         ret = i40e_set_filter_control(hw, &settings);
1429         if (ret)
1430                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1431                                         ret);
1432
1433         /* Disable flow control */
1434         hw->fc.requested_mode = I40E_FC_NONE;
1435         i40e_set_fc(hw, &aq_fail, TRUE);
1436
1437         /* uninitialize pf host driver */
1438         i40e_pf_host_uninit(dev);
1439
1440         rte_free(dev->data->mac_addrs);
1441         dev->data->mac_addrs = NULL;
1442
1443         /* disable uio intr before callback unregister */
1444         rte_intr_disable(intr_handle);
1445
1446         /* register callback func to eal lib */
1447         rte_intr_callback_unregister(intr_handle,
1448                                      i40e_dev_interrupt_handler, dev);
1449
1450         i40e_rm_ethtype_filter_list(pf);
1451         i40e_rm_tunnel_filter_list(pf);
1452         i40e_rm_fdir_filter_list(pf);
1453
1454         /* Remove all flows */
1455         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1456                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1457                 rte_free(p_flow);
1458         }
1459
1460         return 0;
1461 }
1462
1463 static int
1464 i40e_dev_configure(struct rte_eth_dev *dev)
1465 {
1466         struct i40e_adapter *ad =
1467                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1468         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1469         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1470         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1471         int i, ret;
1472
1473         ret = i40e_dev_sync_phy_type(hw);
1474         if (ret)
1475                 return ret;
1476
1477         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1478          * bulk allocation or vector Rx preconditions we will reset it.
1479          */
1480         ad->rx_bulk_alloc_allowed = true;
1481         ad->rx_vec_allowed = true;
1482         ad->tx_simple_allowed = true;
1483         ad->tx_vec_allowed = true;
1484
1485         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1486                 ret = i40e_fdir_setup(pf);
1487                 if (ret != I40E_SUCCESS) {
1488                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1489                         return -ENOTSUP;
1490                 }
1491                 ret = i40e_fdir_configure(dev);
1492                 if (ret < 0) {
1493                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1494                         goto err;
1495                 }
1496         } else
1497                 i40e_fdir_teardown(pf);
1498
1499         ret = i40e_dev_init_vlan(dev);
1500         if (ret < 0)
1501                 goto err;
1502
1503         /* VMDQ setup.
1504          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1505          *  RSS setting have different requirements.
1506          *  General PMD driver call sequence are NIC init, configure,
1507          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1508          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1509          *  applicable. So, VMDQ setting has to be done before
1510          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1511          *  For RSS setting, it will try to calculate actual configured RX queue
1512          *  number, which will be available after rx_queue_setup(). dev_start()
1513          *  function is good to place RSS setup.
1514          */
1515         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1516                 ret = i40e_vmdq_setup(dev);
1517                 if (ret)
1518                         goto err;
1519         }
1520
1521         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1522                 ret = i40e_dcb_setup(dev);
1523                 if (ret) {
1524                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1525                         goto err_dcb;
1526                 }
1527         }
1528
1529         TAILQ_INIT(&pf->flow_list);
1530
1531         return 0;
1532
1533 err_dcb:
1534         /* need to release vmdq resource if exists */
1535         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1536                 i40e_vsi_release(pf->vmdq[i].vsi);
1537                 pf->vmdq[i].vsi = NULL;
1538         }
1539         rte_free(pf->vmdq);
1540         pf->vmdq = NULL;
1541 err:
1542         /* need to release fdir resource if exists */
1543         i40e_fdir_teardown(pf);
1544         return ret;
1545 }
1546
1547 void
1548 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1549 {
1550         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1551         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1552         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1553         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1554         uint16_t msix_vect = vsi->msix_intr;
1555         uint16_t i;
1556
1557         for (i = 0; i < vsi->nb_qps; i++) {
1558                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1559                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1560                 rte_wmb();
1561         }
1562
1563         if (vsi->type != I40E_VSI_SRIOV) {
1564                 if (!rte_intr_allow_others(intr_handle)) {
1565                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1566                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1567                         I40E_WRITE_REG(hw,
1568                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1569                                        0);
1570                 } else {
1571                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1572                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1573                         I40E_WRITE_REG(hw,
1574                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1575                                                        msix_vect - 1), 0);
1576                 }
1577         } else {
1578                 uint32_t reg;
1579                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1580                         vsi->user_param + (msix_vect - 1);
1581
1582                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1583                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1584         }
1585         I40E_WRITE_FLUSH(hw);
1586 }
1587
1588 static void
1589 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1590                        int base_queue, int nb_queue)
1591 {
1592         int i;
1593         uint32_t val;
1594         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1595
1596         /* Bind all RX queues to allocated MSIX interrupt */
1597         for (i = 0; i < nb_queue; i++) {
1598                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1599                         I40E_QINT_RQCTL_ITR_INDX_MASK |
1600                         ((base_queue + i + 1) <<
1601                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1602                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1603                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1604
1605                 if (i == nb_queue - 1)
1606                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1607                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1608         }
1609
1610         /* Write first RX queue to Link list register as the head element */
1611         if (vsi->type != I40E_VSI_SRIOV) {
1612                 uint16_t interval =
1613                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1614
1615                 if (msix_vect == I40E_MISC_VEC_ID) {
1616                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1617                                        (base_queue <<
1618                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1619                                        (0x0 <<
1620                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1621                         I40E_WRITE_REG(hw,
1622                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1623                                        interval);
1624                 } else {
1625                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1626                                        (base_queue <<
1627                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1628                                        (0x0 <<
1629                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1630                         I40E_WRITE_REG(hw,
1631                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1632                                                        msix_vect - 1),
1633                                        interval);
1634                 }
1635         } else {
1636                 uint32_t reg;
1637
1638                 if (msix_vect == I40E_MISC_VEC_ID) {
1639                         I40E_WRITE_REG(hw,
1640                                        I40E_VPINT_LNKLST0(vsi->user_param),
1641                                        (base_queue <<
1642                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1643                                        (0x0 <<
1644                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1645                 } else {
1646                         /* num_msix_vectors_vf needs to minus irq0 */
1647                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1648                                 vsi->user_param + (msix_vect - 1);
1649
1650                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1651                                        (base_queue <<
1652                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1653                                        (0x0 <<
1654                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1655                 }
1656         }
1657
1658         I40E_WRITE_FLUSH(hw);
1659 }
1660
1661 void
1662 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1663 {
1664         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1665         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1666         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1667         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1668         uint16_t msix_vect = vsi->msix_intr;
1669         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1670         uint16_t queue_idx = 0;
1671         int record = 0;
1672         uint32_t val;
1673         int i;
1674
1675         for (i = 0; i < vsi->nb_qps; i++) {
1676                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1677                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1678         }
1679
1680         /* INTENA flag is not auto-cleared for interrupt */
1681         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1682         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1683                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1684                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1685         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1686
1687         /* VF bind interrupt */
1688         if (vsi->type == I40E_VSI_SRIOV) {
1689                 __vsi_queues_bind_intr(vsi, msix_vect,
1690                                        vsi->base_queue, vsi->nb_qps);
1691                 return;
1692         }
1693
1694         /* PF & VMDq bind interrupt */
1695         if (rte_intr_dp_is_en(intr_handle)) {
1696                 if (vsi->type == I40E_VSI_MAIN) {
1697                         queue_idx = 0;
1698                         record = 1;
1699                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1700                         struct i40e_vsi *main_vsi =
1701                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1702                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1703                         record = 1;
1704                 }
1705         }
1706
1707         for (i = 0; i < vsi->nb_used_qps; i++) {
1708                 if (nb_msix <= 1) {
1709                         if (!rte_intr_allow_others(intr_handle))
1710                                 /* allow to share MISC_VEC_ID */
1711                                 msix_vect = I40E_MISC_VEC_ID;
1712
1713                         /* no enough msix_vect, map all to one */
1714                         __vsi_queues_bind_intr(vsi, msix_vect,
1715                                                vsi->base_queue + i,
1716                                                vsi->nb_used_qps - i);
1717                         for (; !!record && i < vsi->nb_used_qps; i++)
1718                                 intr_handle->intr_vec[queue_idx + i] =
1719                                         msix_vect;
1720                         break;
1721                 }
1722                 /* 1:1 queue/msix_vect mapping */
1723                 __vsi_queues_bind_intr(vsi, msix_vect,
1724                                        vsi->base_queue + i, 1);
1725                 if (!!record)
1726                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1727
1728                 msix_vect++;
1729                 nb_msix--;
1730         }
1731 }
1732
1733 static void
1734 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1735 {
1736         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1737         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1738         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1739         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1740         uint16_t interval = i40e_calc_itr_interval(\
1741                 RTE_LIBRTE_I40E_ITR_INTERVAL);
1742         uint16_t msix_intr, i;
1743
1744         if (rte_intr_allow_others(intr_handle))
1745                 for (i = 0; i < vsi->nb_msix; i++) {
1746                         msix_intr = vsi->msix_intr + i;
1747                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1748                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1749                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1750                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1751                                 (interval <<
1752                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1753                 }
1754         else
1755                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1756                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1757                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1758                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1759                                (interval <<
1760                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1761
1762         I40E_WRITE_FLUSH(hw);
1763 }
1764
1765 static void
1766 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1767 {
1768         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1769         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1770         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1771         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1772         uint16_t msix_intr, i;
1773
1774         if (rte_intr_allow_others(intr_handle))
1775                 for (i = 0; i < vsi->nb_msix; i++) {
1776                         msix_intr = vsi->msix_intr + i;
1777                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1778                                        0);
1779                 }
1780         else
1781                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1782
1783         I40E_WRITE_FLUSH(hw);
1784 }
1785
1786 static inline uint8_t
1787 i40e_parse_link_speeds(uint16_t link_speeds)
1788 {
1789         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1790
1791         if (link_speeds & ETH_LINK_SPEED_40G)
1792                 link_speed |= I40E_LINK_SPEED_40GB;
1793         if (link_speeds & ETH_LINK_SPEED_25G)
1794                 link_speed |= I40E_LINK_SPEED_25GB;
1795         if (link_speeds & ETH_LINK_SPEED_20G)
1796                 link_speed |= I40E_LINK_SPEED_20GB;
1797         if (link_speeds & ETH_LINK_SPEED_10G)
1798                 link_speed |= I40E_LINK_SPEED_10GB;
1799         if (link_speeds & ETH_LINK_SPEED_1G)
1800                 link_speed |= I40E_LINK_SPEED_1GB;
1801         if (link_speeds & ETH_LINK_SPEED_100M)
1802                 link_speed |= I40E_LINK_SPEED_100MB;
1803
1804         return link_speed;
1805 }
1806
1807 static int
1808 i40e_phy_conf_link(struct i40e_hw *hw,
1809                    uint8_t abilities,
1810                    uint8_t force_speed)
1811 {
1812         enum i40e_status_code status;
1813         struct i40e_aq_get_phy_abilities_resp phy_ab;
1814         struct i40e_aq_set_phy_config phy_conf;
1815         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1816                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1817                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1818                         I40E_AQ_PHY_FLAG_LOW_POWER;
1819         const uint8_t advt = I40E_LINK_SPEED_40GB |
1820                         I40E_LINK_SPEED_25GB |
1821                         I40E_LINK_SPEED_10GB |
1822                         I40E_LINK_SPEED_1GB |
1823                         I40E_LINK_SPEED_100MB;
1824         int ret = -ENOTSUP;
1825
1826
1827         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1828                                               NULL);
1829         if (status)
1830                 return ret;
1831
1832         memset(&phy_conf, 0, sizeof(phy_conf));
1833
1834         /* bits 0-2 use the values from get_phy_abilities_resp */
1835         abilities &= ~mask;
1836         abilities |= phy_ab.abilities & mask;
1837
1838         /* update ablities and speed */
1839         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1840                 phy_conf.link_speed = advt;
1841         else
1842                 phy_conf.link_speed = force_speed;
1843
1844         phy_conf.abilities = abilities;
1845
1846         /* use get_phy_abilities_resp value for the rest */
1847         phy_conf.phy_type = phy_ab.phy_type;
1848         phy_conf.phy_type_ext = phy_ab.phy_type_ext;
1849         phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
1850         phy_conf.eee_capability = phy_ab.eee_capability;
1851         phy_conf.eeer = phy_ab.eeer_val;
1852         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1853
1854         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1855                     phy_ab.abilities, phy_ab.link_speed);
1856         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1857                     phy_conf.abilities, phy_conf.link_speed);
1858
1859         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1860         if (status)
1861                 return ret;
1862
1863         return I40E_SUCCESS;
1864 }
1865
1866 static int
1867 i40e_apply_link_speed(struct rte_eth_dev *dev)
1868 {
1869         uint8_t speed;
1870         uint8_t abilities = 0;
1871         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1872         struct rte_eth_conf *conf = &dev->data->dev_conf;
1873
1874         speed = i40e_parse_link_speeds(conf->link_speeds);
1875         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1876         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1877                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1878         abilities |= I40E_AQ_PHY_LINK_ENABLED;
1879
1880         /* Skip changing speed on 40G interfaces, FW does not support */
1881         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
1882                 speed =  I40E_LINK_SPEED_UNKNOWN;
1883                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1884         }
1885
1886         return i40e_phy_conf_link(hw, abilities, speed);
1887 }
1888
1889 static int
1890 i40e_dev_start(struct rte_eth_dev *dev)
1891 {
1892         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1893         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1894         struct i40e_vsi *main_vsi = pf->main_vsi;
1895         int ret, i;
1896         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1897         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1898         uint32_t intr_vector = 0;
1899         struct i40e_vsi *vsi;
1900
1901         hw->adapter_stopped = 0;
1902
1903         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1904                 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
1905                              dev->data->port_id);
1906                 return -EINVAL;
1907         }
1908
1909         rte_intr_disable(intr_handle);
1910
1911         if ((rte_intr_cap_multiple(intr_handle) ||
1912              !RTE_ETH_DEV_SRIOV(dev).active) &&
1913             dev->data->dev_conf.intr_conf.rxq != 0) {
1914                 intr_vector = dev->data->nb_rx_queues;
1915                 ret = rte_intr_efd_enable(intr_handle, intr_vector);
1916                 if (ret)
1917                         return ret;
1918         }
1919
1920         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1921                 intr_handle->intr_vec =
1922                         rte_zmalloc("intr_vec",
1923                                     dev->data->nb_rx_queues * sizeof(int),
1924                                     0);
1925                 if (!intr_handle->intr_vec) {
1926                         PMD_INIT_LOG(ERR,
1927                                 "Failed to allocate %d rx_queues intr_vec",
1928                                 dev->data->nb_rx_queues);
1929                         return -ENOMEM;
1930                 }
1931         }
1932
1933         /* Initialize VSI */
1934         ret = i40e_dev_rxtx_init(pf);
1935         if (ret != I40E_SUCCESS) {
1936                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1937                 goto err_up;
1938         }
1939
1940         /* Map queues with MSIX interrupt */
1941         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1942                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1943         i40e_vsi_queues_bind_intr(main_vsi);
1944         i40e_vsi_enable_queues_intr(main_vsi);
1945
1946         /* Map VMDQ VSI queues with MSIX interrupt */
1947         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1948                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1949                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1950                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1951         }
1952
1953         /* enable FDIR MSIX interrupt */
1954         if (pf->fdir.fdir_vsi) {
1955                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1956                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1957         }
1958
1959         /* Enable all queues which have been configured */
1960         ret = i40e_dev_switch_queues(pf, TRUE);
1961         if (ret != I40E_SUCCESS) {
1962                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1963                 goto err_up;
1964         }
1965
1966         /* Enable receiving broadcast packets */
1967         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1968         if (ret != I40E_SUCCESS)
1969                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1970
1971         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1972                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1973                                                 true, NULL);
1974                 if (ret != I40E_SUCCESS)
1975                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1976         }
1977
1978         /* Enable the VLAN promiscuous mode. */
1979         if (pf->vfs) {
1980                 for (i = 0; i < pf->vf_num; i++) {
1981                         vsi = pf->vfs[i].vsi;
1982                         i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
1983                                                      true, NULL);
1984                 }
1985         }
1986
1987         /* Apply link configure */
1988         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
1989                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1990                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
1991                                 ETH_LINK_SPEED_40G)) {
1992                 PMD_DRV_LOG(ERR, "Invalid link setting");
1993                 goto err_up;
1994         }
1995         ret = i40e_apply_link_speed(dev);
1996         if (I40E_SUCCESS != ret) {
1997                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1998                 goto err_up;
1999         }
2000
2001         if (!rte_intr_allow_others(intr_handle)) {
2002                 rte_intr_callback_unregister(intr_handle,
2003                                              i40e_dev_interrupt_handler,
2004                                              (void *)dev);
2005                 /* configure and enable device interrupt */
2006                 i40e_pf_config_irq0(hw, FALSE);
2007                 i40e_pf_enable_irq0(hw);
2008
2009                 if (dev->data->dev_conf.intr_conf.lsc != 0)
2010                         PMD_INIT_LOG(INFO,
2011                                 "lsc won't enable because of no intr multiplex");
2012         } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
2013                 ret = i40e_aq_set_phy_int_mask(hw,
2014                                                ~(I40E_AQ_EVENT_LINK_UPDOWN |
2015                                                I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2016                                                I40E_AQ_EVENT_MEDIA_NA), NULL);
2017                 if (ret != I40E_SUCCESS)
2018                         PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2019
2020                 /* Call get_link_info aq commond to enable LSE */
2021                 i40e_dev_link_update(dev, 0);
2022         }
2023
2024         /* enable uio intr after callback register */
2025         rte_intr_enable(intr_handle);
2026
2027         i40e_filter_restore(pf);
2028
2029         return I40E_SUCCESS;
2030
2031 err_up:
2032         i40e_dev_switch_queues(pf, FALSE);
2033         i40e_dev_clear_queues(dev);
2034
2035         return ret;
2036 }
2037
2038 static void
2039 i40e_dev_stop(struct rte_eth_dev *dev)
2040 {
2041         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2042         struct i40e_vsi *main_vsi = pf->main_vsi;
2043         struct i40e_mirror_rule *p_mirror;
2044         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2045         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2046         int i;
2047
2048         /* Disable all queues */
2049         i40e_dev_switch_queues(pf, FALSE);
2050
2051         /* un-map queues with interrupt registers */
2052         i40e_vsi_disable_queues_intr(main_vsi);
2053         i40e_vsi_queues_unbind_intr(main_vsi);
2054
2055         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2056                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2057                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2058         }
2059
2060         if (pf->fdir.fdir_vsi) {
2061                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2062                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2063         }
2064         /* Clear all queues and release memory */
2065         i40e_dev_clear_queues(dev);
2066
2067         /* Set link down */
2068         i40e_dev_set_link_down(dev);
2069
2070         /* Remove all mirror rules */
2071         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2072                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2073                 rte_free(p_mirror);
2074         }
2075         pf->nb_mirror_rule = 0;
2076
2077         if (!rte_intr_allow_others(intr_handle))
2078                 /* resume to the default handler */
2079                 rte_intr_callback_register(intr_handle,
2080                                            i40e_dev_interrupt_handler,
2081                                            (void *)dev);
2082
2083         /* Clean datapath event and queue/vec mapping */
2084         rte_intr_efd_disable(intr_handle);
2085         if (intr_handle->intr_vec) {
2086                 rte_free(intr_handle->intr_vec);
2087                 intr_handle->intr_vec = NULL;
2088         }
2089 }
2090
2091 static void
2092 i40e_dev_close(struct rte_eth_dev *dev)
2093 {
2094         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2095         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2096         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2097         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2098         uint32_t reg;
2099         int i;
2100
2101         PMD_INIT_FUNC_TRACE();
2102
2103         i40e_dev_stop(dev);
2104         hw->adapter_stopped = 1;
2105         i40e_dev_free_queues(dev);
2106
2107         /* Disable interrupt */
2108         i40e_pf_disable_irq0(hw);
2109         rte_intr_disable(intr_handle);
2110
2111         /* shutdown and destroy the HMC */
2112         i40e_shutdown_lan_hmc(hw);
2113
2114         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2115                 i40e_vsi_release(pf->vmdq[i].vsi);
2116                 pf->vmdq[i].vsi = NULL;
2117         }
2118         rte_free(pf->vmdq);
2119         pf->vmdq = NULL;
2120
2121         /* release all the existing VSIs and VEBs */
2122         i40e_fdir_teardown(pf);
2123         i40e_vsi_release(pf->main_vsi);
2124
2125         /* shutdown the adminq */
2126         i40e_aq_queue_shutdown(hw, true);
2127         i40e_shutdown_adminq(hw);
2128
2129         i40e_res_pool_destroy(&pf->qp_pool);
2130         i40e_res_pool_destroy(&pf->msix_pool);
2131
2132         /* force a PF reset to clean anything leftover */
2133         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2134         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2135                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2136         I40E_WRITE_FLUSH(hw);
2137 }
2138
2139 static void
2140 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2141 {
2142         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2143         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2144         struct i40e_vsi *vsi = pf->main_vsi;
2145         int status;
2146
2147         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2148                                                      true, NULL, true);
2149         if (status != I40E_SUCCESS)
2150                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2151
2152         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2153                                                         TRUE, NULL);
2154         if (status != I40E_SUCCESS)
2155                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2156
2157 }
2158
2159 static void
2160 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2161 {
2162         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2163         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2164         struct i40e_vsi *vsi = pf->main_vsi;
2165         int status;
2166
2167         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2168                                                      false, NULL, true);
2169         if (status != I40E_SUCCESS)
2170                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2171
2172         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2173                                                         false, NULL);
2174         if (status != I40E_SUCCESS)
2175                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2176 }
2177
2178 static void
2179 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2180 {
2181         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2182         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2183         struct i40e_vsi *vsi = pf->main_vsi;
2184         int ret;
2185
2186         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2187         if (ret != I40E_SUCCESS)
2188                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2189 }
2190
2191 static void
2192 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2193 {
2194         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2195         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2196         struct i40e_vsi *vsi = pf->main_vsi;
2197         int ret;
2198
2199         if (dev->data->promiscuous == 1)
2200                 return; /* must remain in all_multicast mode */
2201
2202         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2203                                 vsi->seid, FALSE, NULL);
2204         if (ret != I40E_SUCCESS)
2205                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2206 }
2207
2208 /*
2209  * Set device link up.
2210  */
2211 static int
2212 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2213 {
2214         /* re-apply link speed setting */
2215         return i40e_apply_link_speed(dev);
2216 }
2217
2218 /*
2219  * Set device link down.
2220  */
2221 static int
2222 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2223 {
2224         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2225         uint8_t abilities = 0;
2226         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2227
2228         abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2229         return i40e_phy_conf_link(hw, abilities, speed);
2230 }
2231
2232 int
2233 i40e_dev_link_update(struct rte_eth_dev *dev,
2234                      int wait_to_complete)
2235 {
2236 #define CHECK_INTERVAL 100  /* 100ms */
2237 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
2238         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2239         struct i40e_link_status link_status;
2240         struct rte_eth_link link, old;
2241         int status;
2242         unsigned rep_cnt = MAX_REPEAT_TIME;
2243         bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2244
2245         memset(&link, 0, sizeof(link));
2246         memset(&old, 0, sizeof(old));
2247         memset(&link_status, 0, sizeof(link_status));
2248         rte_i40e_dev_atomic_read_link_status(dev, &old);
2249
2250         do {
2251                 /* Get link status information from hardware */
2252                 status = i40e_aq_get_link_info(hw, enable_lse,
2253                                                 &link_status, NULL);
2254                 if (status != I40E_SUCCESS) {
2255                         link.link_speed = ETH_SPEED_NUM_100M;
2256                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2257                         PMD_DRV_LOG(ERR, "Failed to get link info");
2258                         goto out;
2259                 }
2260
2261                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2262                 if (!wait_to_complete || link.link_status)
2263                         break;
2264
2265                 rte_delay_ms(CHECK_INTERVAL);
2266         } while (--rep_cnt);
2267
2268         if (!link.link_status)
2269                 goto out;
2270
2271         /* i40e uses full duplex only */
2272         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2273
2274         /* Parse the link status */
2275         switch (link_status.link_speed) {
2276         case I40E_LINK_SPEED_100MB:
2277                 link.link_speed = ETH_SPEED_NUM_100M;
2278                 break;
2279         case I40E_LINK_SPEED_1GB:
2280                 link.link_speed = ETH_SPEED_NUM_1G;
2281                 break;
2282         case I40E_LINK_SPEED_10GB:
2283                 link.link_speed = ETH_SPEED_NUM_10G;
2284                 break;
2285         case I40E_LINK_SPEED_20GB:
2286                 link.link_speed = ETH_SPEED_NUM_20G;
2287                 break;
2288         case I40E_LINK_SPEED_25GB:
2289                 link.link_speed = ETH_SPEED_NUM_25G;
2290                 break;
2291         case I40E_LINK_SPEED_40GB:
2292                 link.link_speed = ETH_SPEED_NUM_40G;
2293                 break;
2294         default:
2295                 link.link_speed = ETH_SPEED_NUM_100M;
2296                 break;
2297         }
2298
2299         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2300                         ETH_LINK_SPEED_FIXED);
2301
2302 out:
2303         rte_i40e_dev_atomic_write_link_status(dev, &link);
2304         if (link.link_status == old.link_status)
2305                 return -1;
2306
2307         i40e_notify_all_vfs_link_status(dev);
2308
2309         return 0;
2310 }
2311
2312 /* Get all the statistics of a VSI */
2313 void
2314 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2315 {
2316         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2317         struct i40e_eth_stats *nes = &vsi->eth_stats;
2318         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2319         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2320
2321         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2322                             vsi->offset_loaded, &oes->rx_bytes,
2323                             &nes->rx_bytes);
2324         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2325                             vsi->offset_loaded, &oes->rx_unicast,
2326                             &nes->rx_unicast);
2327         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2328                             vsi->offset_loaded, &oes->rx_multicast,
2329                             &nes->rx_multicast);
2330         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2331                             vsi->offset_loaded, &oes->rx_broadcast,
2332                             &nes->rx_broadcast);
2333         /* exclude CRC bytes */
2334         nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2335                 nes->rx_broadcast) * ETHER_CRC_LEN;
2336
2337         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2338                             &oes->rx_discards, &nes->rx_discards);
2339         /* GLV_REPC not supported */
2340         /* GLV_RMPC not supported */
2341         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2342                             &oes->rx_unknown_protocol,
2343                             &nes->rx_unknown_protocol);
2344         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2345                             vsi->offset_loaded, &oes->tx_bytes,
2346                             &nes->tx_bytes);
2347         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2348                             vsi->offset_loaded, &oes->tx_unicast,
2349                             &nes->tx_unicast);
2350         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2351                             vsi->offset_loaded, &oes->tx_multicast,
2352                             &nes->tx_multicast);
2353         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2354                             vsi->offset_loaded,  &oes->tx_broadcast,
2355                             &nes->tx_broadcast);
2356         /* exclude CRC bytes */
2357         nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
2358                 nes->tx_broadcast) * ETHER_CRC_LEN;
2359         /* GLV_TDPC not supported */
2360         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2361                             &oes->tx_errors, &nes->tx_errors);
2362         vsi->offset_loaded = true;
2363
2364         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2365                     vsi->vsi_id);
2366         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2367         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2368         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2369         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2370         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2371         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2372                     nes->rx_unknown_protocol);
2373         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2374         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2375         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2376         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2377         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2378         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2379         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2380                     vsi->vsi_id);
2381 }
2382
2383 static void
2384 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2385 {
2386         unsigned int i;
2387         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2388         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2389
2390         /* Get rx/tx bytes of internal transfer packets */
2391         i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2392                         I40E_GLV_GORCL(hw->port),
2393                         pf->offset_loaded,
2394                         &pf->internal_rx_bytes_offset,
2395                         &pf->internal_rx_bytes);
2396
2397         i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2398                         I40E_GLV_GOTCL(hw->port),
2399                         pf->offset_loaded,
2400                         &pf->internal_tx_bytes_offset,
2401                         &pf->internal_tx_bytes);
2402
2403         /* Get statistics of struct i40e_eth_stats */
2404         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2405                             I40E_GLPRT_GORCL(hw->port),
2406                             pf->offset_loaded, &os->eth.rx_bytes,
2407                             &ns->eth.rx_bytes);
2408         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2409                             I40E_GLPRT_UPRCL(hw->port),
2410                             pf->offset_loaded, &os->eth.rx_unicast,
2411                             &ns->eth.rx_unicast);
2412         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2413                             I40E_GLPRT_MPRCL(hw->port),
2414                             pf->offset_loaded, &os->eth.rx_multicast,
2415                             &ns->eth.rx_multicast);
2416         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2417                             I40E_GLPRT_BPRCL(hw->port),
2418                             pf->offset_loaded, &os->eth.rx_broadcast,
2419                             &ns->eth.rx_broadcast);
2420         /* Workaround: CRC size should not be included in byte statistics,
2421          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2422          */
2423         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2424                 ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes;
2425
2426         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2427                             pf->offset_loaded, &os->eth.rx_discards,
2428                             &ns->eth.rx_discards);
2429         /* GLPRT_REPC not supported */
2430         /* GLPRT_RMPC not supported */
2431         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2432                             pf->offset_loaded,
2433                             &os->eth.rx_unknown_protocol,
2434                             &ns->eth.rx_unknown_protocol);
2435         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2436                             I40E_GLPRT_GOTCL(hw->port),
2437                             pf->offset_loaded, &os->eth.tx_bytes,
2438                             &ns->eth.tx_bytes);
2439         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2440                             I40E_GLPRT_UPTCL(hw->port),
2441                             pf->offset_loaded, &os->eth.tx_unicast,
2442                             &ns->eth.tx_unicast);
2443         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2444                             I40E_GLPRT_MPTCL(hw->port),
2445                             pf->offset_loaded, &os->eth.tx_multicast,
2446                             &ns->eth.tx_multicast);
2447         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2448                             I40E_GLPRT_BPTCL(hw->port),
2449                             pf->offset_loaded, &os->eth.tx_broadcast,
2450                             &ns->eth.tx_broadcast);
2451         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2452                 ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes;
2453         /* GLPRT_TEPC not supported */
2454
2455         /* additional port specific stats */
2456         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2457                             pf->offset_loaded, &os->tx_dropped_link_down,
2458                             &ns->tx_dropped_link_down);
2459         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2460                             pf->offset_loaded, &os->crc_errors,
2461                             &ns->crc_errors);
2462         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2463                             pf->offset_loaded, &os->illegal_bytes,
2464                             &ns->illegal_bytes);
2465         /* GLPRT_ERRBC not supported */
2466         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2467                             pf->offset_loaded, &os->mac_local_faults,
2468                             &ns->mac_local_faults);
2469         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2470                             pf->offset_loaded, &os->mac_remote_faults,
2471                             &ns->mac_remote_faults);
2472         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2473                             pf->offset_loaded, &os->rx_length_errors,
2474                             &ns->rx_length_errors);
2475         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2476                             pf->offset_loaded, &os->link_xon_rx,
2477                             &ns->link_xon_rx);
2478         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2479                             pf->offset_loaded, &os->link_xoff_rx,
2480                             &ns->link_xoff_rx);
2481         for (i = 0; i < 8; i++) {
2482                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2483                                     pf->offset_loaded,
2484                                     &os->priority_xon_rx[i],
2485                                     &ns->priority_xon_rx[i]);
2486                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2487                                     pf->offset_loaded,
2488                                     &os->priority_xoff_rx[i],
2489                                     &ns->priority_xoff_rx[i]);
2490         }
2491         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2492                             pf->offset_loaded, &os->link_xon_tx,
2493                             &ns->link_xon_tx);
2494         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2495                             pf->offset_loaded, &os->link_xoff_tx,
2496                             &ns->link_xoff_tx);
2497         for (i = 0; i < 8; i++) {
2498                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2499                                     pf->offset_loaded,
2500                                     &os->priority_xon_tx[i],
2501                                     &ns->priority_xon_tx[i]);
2502                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2503                                     pf->offset_loaded,
2504                                     &os->priority_xoff_tx[i],
2505                                     &ns->priority_xoff_tx[i]);
2506                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2507                                     pf->offset_loaded,
2508                                     &os->priority_xon_2_xoff[i],
2509                                     &ns->priority_xon_2_xoff[i]);
2510         }
2511         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2512                             I40E_GLPRT_PRC64L(hw->port),
2513                             pf->offset_loaded, &os->rx_size_64,
2514                             &ns->rx_size_64);
2515         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2516                             I40E_GLPRT_PRC127L(hw->port),
2517                             pf->offset_loaded, &os->rx_size_127,
2518                             &ns->rx_size_127);
2519         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2520                             I40E_GLPRT_PRC255L(hw->port),
2521                             pf->offset_loaded, &os->rx_size_255,
2522                             &ns->rx_size_255);
2523         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2524                             I40E_GLPRT_PRC511L(hw->port),
2525                             pf->offset_loaded, &os->rx_size_511,
2526                             &ns->rx_size_511);
2527         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2528                             I40E_GLPRT_PRC1023L(hw->port),
2529                             pf->offset_loaded, &os->rx_size_1023,
2530                             &ns->rx_size_1023);
2531         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2532                             I40E_GLPRT_PRC1522L(hw->port),
2533                             pf->offset_loaded, &os->rx_size_1522,
2534                             &ns->rx_size_1522);
2535         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2536                             I40E_GLPRT_PRC9522L(hw->port),
2537                             pf->offset_loaded, &os->rx_size_big,
2538                             &ns->rx_size_big);
2539         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2540                             pf->offset_loaded, &os->rx_undersize,
2541                             &ns->rx_undersize);
2542         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2543                             pf->offset_loaded, &os->rx_fragments,
2544                             &ns->rx_fragments);
2545         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2546                             pf->offset_loaded, &os->rx_oversize,
2547                             &ns->rx_oversize);
2548         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2549                             pf->offset_loaded, &os->rx_jabber,
2550                             &ns->rx_jabber);
2551         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2552                             I40E_GLPRT_PTC64L(hw->port),
2553                             pf->offset_loaded, &os->tx_size_64,
2554                             &ns->tx_size_64);
2555         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2556                             I40E_GLPRT_PTC127L(hw->port),
2557                             pf->offset_loaded, &os->tx_size_127,
2558                             &ns->tx_size_127);
2559         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2560                             I40E_GLPRT_PTC255L(hw->port),
2561                             pf->offset_loaded, &os->tx_size_255,
2562                             &ns->tx_size_255);
2563         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2564                             I40E_GLPRT_PTC511L(hw->port),
2565                             pf->offset_loaded, &os->tx_size_511,
2566                             &ns->tx_size_511);
2567         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2568                             I40E_GLPRT_PTC1023L(hw->port),
2569                             pf->offset_loaded, &os->tx_size_1023,
2570                             &ns->tx_size_1023);
2571         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2572                             I40E_GLPRT_PTC1522L(hw->port),
2573                             pf->offset_loaded, &os->tx_size_1522,
2574                             &ns->tx_size_1522);
2575         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2576                             I40E_GLPRT_PTC9522L(hw->port),
2577                             pf->offset_loaded, &os->tx_size_big,
2578                             &ns->tx_size_big);
2579         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2580                            pf->offset_loaded,
2581                            &os->fd_sb_match, &ns->fd_sb_match);
2582         /* GLPRT_MSPDC not supported */
2583         /* GLPRT_XEC not supported */
2584
2585         pf->offset_loaded = true;
2586
2587         if (pf->main_vsi)
2588                 i40e_update_vsi_stats(pf->main_vsi);
2589 }
2590
2591 /* Get all statistics of a port */
2592 static void
2593 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2594 {
2595         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2596         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2597         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2598         unsigned i;
2599
2600         /* call read registers - updates values, now write them to struct */
2601         i40e_read_stats_registers(pf, hw);
2602
2603         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2604                         pf->main_vsi->eth_stats.rx_multicast +
2605                         pf->main_vsi->eth_stats.rx_broadcast -
2606                         pf->main_vsi->eth_stats.rx_discards;
2607         stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2608                         pf->main_vsi->eth_stats.tx_multicast +
2609                         pf->main_vsi->eth_stats.tx_broadcast;
2610         stats->ibytes   = ns->eth.rx_bytes;
2611         stats->obytes   = ns->eth.tx_bytes;
2612         stats->oerrors  = ns->eth.tx_errors +
2613                         pf->main_vsi->eth_stats.tx_errors;
2614
2615         /* Rx Errors */
2616         stats->imissed  = ns->eth.rx_discards +
2617                         pf->main_vsi->eth_stats.rx_discards;
2618         stats->ierrors  = ns->crc_errors +
2619                         ns->rx_length_errors + ns->rx_undersize +
2620                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2621
2622         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2623         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2624         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2625         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2626         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2627         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2628         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2629                     ns->eth.rx_unknown_protocol);
2630         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2631         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2632         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2633         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2634         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2635         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2636
2637         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2638                     ns->tx_dropped_link_down);
2639         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2640         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2641                     ns->illegal_bytes);
2642         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2643         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2644                     ns->mac_local_faults);
2645         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2646                     ns->mac_remote_faults);
2647         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2648                     ns->rx_length_errors);
2649         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2650         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2651         for (i = 0; i < 8; i++) {
2652                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2653                                 i, ns->priority_xon_rx[i]);
2654                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2655                                 i, ns->priority_xoff_rx[i]);
2656         }
2657         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2658         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2659         for (i = 0; i < 8; i++) {
2660                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2661                                 i, ns->priority_xon_tx[i]);
2662                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2663                                 i, ns->priority_xoff_tx[i]);
2664                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2665                                 i, ns->priority_xon_2_xoff[i]);
2666         }
2667         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2668         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2669         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2670         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2671         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2672         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2673         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2674         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2675         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2676         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2677         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2678         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2679         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2680         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2681         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2682         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2683         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2684         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2685         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2686                         ns->mac_short_packet_dropped);
2687         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2688                     ns->checksum_error);
2689         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2690         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2691 }
2692
2693 /* Reset the statistics */
2694 static void
2695 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2696 {
2697         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2698         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2699
2700         /* Mark PF and VSI stats to update the offset, aka "reset" */
2701         pf->offset_loaded = false;
2702         if (pf->main_vsi)
2703                 pf->main_vsi->offset_loaded = false;
2704
2705         /* read the stats, reading current register values into offset */
2706         i40e_read_stats_registers(pf, hw);
2707 }
2708
2709 static uint32_t
2710 i40e_xstats_calc_num(void)
2711 {
2712         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2713                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2714                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2715 }
2716
2717 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2718                                      struct rte_eth_xstat_name *xstats_names,
2719                                      __rte_unused unsigned limit)
2720 {
2721         unsigned count = 0;
2722         unsigned i, prio;
2723
2724         if (xstats_names == NULL)
2725                 return i40e_xstats_calc_num();
2726
2727         /* Note: limit checked in rte_eth_xstats_names() */
2728
2729         /* Get stats from i40e_eth_stats struct */
2730         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2731                 snprintf(xstats_names[count].name,
2732                          sizeof(xstats_names[count].name),
2733                          "%s", rte_i40e_stats_strings[i].name);
2734                 count++;
2735         }
2736
2737         /* Get individiual stats from i40e_hw_port struct */
2738         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2739                 snprintf(xstats_names[count].name,
2740                         sizeof(xstats_names[count].name),
2741                          "%s", rte_i40e_hw_port_strings[i].name);
2742                 count++;
2743         }
2744
2745         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2746                 for (prio = 0; prio < 8; prio++) {
2747                         snprintf(xstats_names[count].name,
2748                                  sizeof(xstats_names[count].name),
2749                                  "rx_priority%u_%s", prio,
2750                                  rte_i40e_rxq_prio_strings[i].name);
2751                         count++;
2752                 }
2753         }
2754
2755         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2756                 for (prio = 0; prio < 8; prio++) {
2757                         snprintf(xstats_names[count].name,
2758                                  sizeof(xstats_names[count].name),
2759                                  "tx_priority%u_%s", prio,
2760                                  rte_i40e_txq_prio_strings[i].name);
2761                         count++;
2762                 }
2763         }
2764         return count;
2765 }
2766
2767 static int
2768 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2769                     unsigned n)
2770 {
2771         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2772         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2773         unsigned i, count, prio;
2774         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2775
2776         count = i40e_xstats_calc_num();
2777         if (n < count)
2778                 return count;
2779
2780         i40e_read_stats_registers(pf, hw);
2781
2782         if (xstats == NULL)
2783                 return 0;
2784
2785         count = 0;
2786
2787         /* Get stats from i40e_eth_stats struct */
2788         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2789                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2790                         rte_i40e_stats_strings[i].offset);
2791                 xstats[count].id = count;
2792                 count++;
2793         }
2794
2795         /* Get individiual stats from i40e_hw_port struct */
2796         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2797                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2798                         rte_i40e_hw_port_strings[i].offset);
2799                 xstats[count].id = count;
2800                 count++;
2801         }
2802
2803         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2804                 for (prio = 0; prio < 8; prio++) {
2805                         xstats[count].value =
2806                                 *(uint64_t *)(((char *)hw_stats) +
2807                                 rte_i40e_rxq_prio_strings[i].offset +
2808                                 (sizeof(uint64_t) * prio));
2809                         xstats[count].id = count;
2810                         count++;
2811                 }
2812         }
2813
2814         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2815                 for (prio = 0; prio < 8; prio++) {
2816                         xstats[count].value =
2817                                 *(uint64_t *)(((char *)hw_stats) +
2818                                 rte_i40e_txq_prio_strings[i].offset +
2819                                 (sizeof(uint64_t) * prio));
2820                         xstats[count].id = count;
2821                         count++;
2822                 }
2823         }
2824
2825         return count;
2826 }
2827
2828 static int
2829 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2830                                  __rte_unused uint16_t queue_id,
2831                                  __rte_unused uint8_t stat_idx,
2832                                  __rte_unused uint8_t is_rx)
2833 {
2834         PMD_INIT_FUNC_TRACE();
2835
2836         return -ENOSYS;
2837 }
2838
2839 static int
2840 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2841 {
2842         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2843         u32 full_ver;
2844         u8 ver, patch;
2845         u16 build;
2846         int ret;
2847
2848         full_ver = hw->nvm.oem_ver;
2849         ver = (u8)(full_ver >> 24);
2850         build = (u16)((full_ver >> 8) & 0xffff);
2851         patch = (u8)(full_ver & 0xff);
2852
2853         ret = snprintf(fw_version, fw_size,
2854                  "%d.%d%d 0x%08x %d.%d.%d",
2855                  ((hw->nvm.version >> 12) & 0xf),
2856                  ((hw->nvm.version >> 4) & 0xff),
2857                  (hw->nvm.version & 0xf), hw->nvm.eetrack,
2858                  ver, build, patch);
2859
2860         ret += 1; /* add the size of '\0' */
2861         if (fw_size < (u32)ret)
2862                 return ret;
2863         else
2864                 return 0;
2865 }
2866
2867 static void
2868 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2869 {
2870         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2871         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2872         struct i40e_vsi *vsi = pf->main_vsi;
2873         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2874
2875         dev_info->pci_dev = pci_dev;
2876         dev_info->max_rx_queues = vsi->nb_qps;
2877         dev_info->max_tx_queues = vsi->nb_qps;
2878         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2879         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2880         dev_info->max_mac_addrs = vsi->max_macaddrs;
2881         dev_info->max_vfs = pci_dev->max_vfs;
2882         dev_info->rx_offload_capa =
2883                 DEV_RX_OFFLOAD_VLAN_STRIP |
2884                 DEV_RX_OFFLOAD_QINQ_STRIP |
2885                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2886                 DEV_RX_OFFLOAD_UDP_CKSUM |
2887                 DEV_RX_OFFLOAD_TCP_CKSUM;
2888         dev_info->tx_offload_capa =
2889                 DEV_TX_OFFLOAD_VLAN_INSERT |
2890                 DEV_TX_OFFLOAD_QINQ_INSERT |
2891                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2892                 DEV_TX_OFFLOAD_UDP_CKSUM |
2893                 DEV_TX_OFFLOAD_TCP_CKSUM |
2894                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2895                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2896                 DEV_TX_OFFLOAD_TCP_TSO |
2897                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2898                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2899                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2900                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
2901         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2902                                                 sizeof(uint32_t);
2903         dev_info->reta_size = pf->hash_lut_size;
2904         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2905
2906         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2907                 .rx_thresh = {
2908                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2909                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2910                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2911                 },
2912                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2913                 .rx_drop_en = 0,
2914         };
2915
2916         dev_info->default_txconf = (struct rte_eth_txconf) {
2917                 .tx_thresh = {
2918                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2919                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2920                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2921                 },
2922                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2923                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2924                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2925                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2926         };
2927
2928         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2929                 .nb_max = I40E_MAX_RING_DESC,
2930                 .nb_min = I40E_MIN_RING_DESC,
2931                 .nb_align = I40E_ALIGN_RING_DESC,
2932         };
2933
2934         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2935                 .nb_max = I40E_MAX_RING_DESC,
2936                 .nb_min = I40E_MIN_RING_DESC,
2937                 .nb_align = I40E_ALIGN_RING_DESC,
2938                 .nb_seg_max = I40E_TX_MAX_SEG,
2939                 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
2940         };
2941
2942         if (pf->flags & I40E_FLAG_VMDQ) {
2943                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2944                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2945                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2946                                                 pf->max_nb_vmdq_vsi;
2947                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2948                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2949                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2950         }
2951
2952         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
2953                 /* For XL710 */
2954                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
2955         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
2956                 /* For XXV710 */
2957                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
2958         else
2959                 /* For X710 */
2960                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2961 }
2962
2963 static int
2964 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2965 {
2966         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2967         struct i40e_vsi *vsi = pf->main_vsi;
2968         PMD_INIT_FUNC_TRACE();
2969
2970         if (on)
2971                 return i40e_vsi_add_vlan(vsi, vlan_id);
2972         else
2973                 return i40e_vsi_delete_vlan(vsi, vlan_id);
2974 }
2975
2976 static int
2977 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
2978                                 enum rte_vlan_type vlan_type,
2979                                 uint16_t tpid, int qinq)
2980 {
2981         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2982         uint64_t reg_r = 0;
2983         uint64_t reg_w = 0;
2984         uint16_t reg_id = 3;
2985         int ret;
2986
2987         if (qinq) {
2988                 if (vlan_type == ETH_VLAN_TYPE_OUTER)
2989                         reg_id = 2;
2990         }
2991
2992         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2993                                           &reg_r, NULL);
2994         if (ret != I40E_SUCCESS) {
2995                 PMD_DRV_LOG(ERR,
2996                            "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
2997                            reg_id);
2998                 return -EIO;
2999         }
3000         PMD_DRV_LOG(DEBUG,
3001                     "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3002                     reg_id, reg_r);
3003
3004         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3005         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3006         if (reg_r == reg_w) {
3007                 PMD_DRV_LOG(DEBUG, "No need to write");
3008                 return 0;
3009         }
3010
3011         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3012                                            reg_w, NULL);
3013         if (ret != I40E_SUCCESS) {
3014                 PMD_DRV_LOG(ERR,
3015                             "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3016                             reg_id);
3017                 return -EIO;
3018         }
3019         PMD_DRV_LOG(DEBUG,
3020                     "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
3021                     reg_w, reg_id);
3022
3023         return 0;
3024 }
3025
3026 static int
3027 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3028                    enum rte_vlan_type vlan_type,
3029                    uint16_t tpid)
3030 {
3031         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3032         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
3033         int ret = 0;
3034
3035         if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3036              vlan_type != ETH_VLAN_TYPE_OUTER) ||
3037             (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3038                 PMD_DRV_LOG(ERR,
3039                             "Unsupported vlan type.");
3040                 return -EINVAL;
3041         }
3042         /* 802.1ad frames ability is added in NVM API 1.7*/
3043         if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3044                 if (qinq) {
3045                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3046                                 hw->first_tag = rte_cpu_to_le_16(tpid);
3047                         else if (vlan_type == ETH_VLAN_TYPE_INNER)
3048                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3049                 } else {
3050                         if (vlan_type == ETH_VLAN_TYPE_OUTER)
3051                                 hw->second_tag = rte_cpu_to_le_16(tpid);
3052                 }
3053                 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3054                 if (ret != I40E_SUCCESS) {
3055                         PMD_DRV_LOG(ERR,
3056                                     "Set switch config failed aq_err: %d",
3057                                     hw->aq.asq_last_status);
3058                         ret = -EIO;
3059                 }
3060         } else
3061                 /* If NVM API < 1.7, keep the register setting */
3062                 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3063                                                       tpid, qinq);
3064
3065         return ret;
3066 }
3067
3068 static void
3069 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3070 {
3071         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3072         struct i40e_vsi *vsi = pf->main_vsi;
3073
3074         if (mask & ETH_VLAN_FILTER_MASK) {
3075                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3076                         i40e_vsi_config_vlan_filter(vsi, TRUE);
3077                 else
3078                         i40e_vsi_config_vlan_filter(vsi, FALSE);
3079         }
3080
3081         if (mask & ETH_VLAN_STRIP_MASK) {
3082                 /* Enable or disable VLAN stripping */
3083                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
3084                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
3085                 else
3086                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
3087         }
3088
3089         if (mask & ETH_VLAN_EXTEND_MASK) {
3090                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
3091                         i40e_vsi_config_double_vlan(vsi, TRUE);
3092                         /* Set global registers with default ethertype. */
3093                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3094                                            ETHER_TYPE_VLAN);
3095                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3096                                            ETHER_TYPE_VLAN);
3097                 }
3098                 else
3099                         i40e_vsi_config_double_vlan(vsi, FALSE);
3100         }
3101 }
3102
3103 static void
3104 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3105                           __rte_unused uint16_t queue,
3106                           __rte_unused int on)
3107 {
3108         PMD_INIT_FUNC_TRACE();
3109 }
3110
3111 static int
3112 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3113 {
3114         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3115         struct i40e_vsi *vsi = pf->main_vsi;
3116         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3117         struct i40e_vsi_vlan_pvid_info info;
3118
3119         memset(&info, 0, sizeof(info));
3120         info.on = on;
3121         if (info.on)
3122                 info.config.pvid = pvid;
3123         else {
3124                 info.config.reject.tagged =
3125                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
3126                 info.config.reject.untagged =
3127                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
3128         }
3129
3130         return i40e_vsi_vlan_pvid_set(vsi, &info);
3131 }
3132
3133 static int
3134 i40e_dev_led_on(struct rte_eth_dev *dev)
3135 {
3136         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3137         uint32_t mode = i40e_led_get(hw);
3138
3139         if (mode == 0)
3140                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3141
3142         return 0;
3143 }
3144
3145 static int
3146 i40e_dev_led_off(struct rte_eth_dev *dev)
3147 {
3148         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3149         uint32_t mode = i40e_led_get(hw);
3150
3151         if (mode != 0)
3152                 i40e_led_set(hw, 0, false);
3153
3154         return 0;
3155 }
3156
3157 static int
3158 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3159 {
3160         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3161         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3162
3163         fc_conf->pause_time = pf->fc_conf.pause_time;
3164         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3165         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3166
3167          /* Return current mode according to actual setting*/
3168         switch (hw->fc.current_mode) {
3169         case I40E_FC_FULL:
3170                 fc_conf->mode = RTE_FC_FULL;
3171                 break;
3172         case I40E_FC_TX_PAUSE:
3173                 fc_conf->mode = RTE_FC_TX_PAUSE;
3174                 break;
3175         case I40E_FC_RX_PAUSE:
3176                 fc_conf->mode = RTE_FC_RX_PAUSE;
3177                 break;
3178         case I40E_FC_NONE:
3179         default:
3180                 fc_conf->mode = RTE_FC_NONE;
3181         };
3182
3183         return 0;
3184 }
3185
3186 static int
3187 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3188 {
3189         uint32_t mflcn_reg, fctrl_reg, reg;
3190         uint32_t max_high_water;
3191         uint8_t i, aq_failure;
3192         int err;
3193         struct i40e_hw *hw;
3194         struct i40e_pf *pf;
3195         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3196                 [RTE_FC_NONE] = I40E_FC_NONE,
3197                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3198                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3199                 [RTE_FC_FULL] = I40E_FC_FULL
3200         };
3201
3202         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3203
3204         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3205         if ((fc_conf->high_water > max_high_water) ||
3206                         (fc_conf->high_water < fc_conf->low_water)) {
3207                 PMD_INIT_LOG(ERR,
3208                         "Invalid high/low water setup value in KB, High_water must be <= %d.",
3209                         max_high_water);
3210                 return -EINVAL;
3211         }
3212
3213         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3214         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3215         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3216
3217         pf->fc_conf.pause_time = fc_conf->pause_time;
3218         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3219         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3220
3221         PMD_INIT_FUNC_TRACE();
3222
3223         /* All the link flow control related enable/disable register
3224          * configuration is handle by the F/W
3225          */
3226         err = i40e_set_fc(hw, &aq_failure, true);
3227         if (err < 0)
3228                 return -ENOSYS;
3229
3230         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3231                 /* Configure flow control refresh threshold,
3232                  * the value for stat_tx_pause_refresh_timer[8]
3233                  * is used for global pause operation.
3234                  */
3235
3236                 I40E_WRITE_REG(hw,
3237                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3238                                pf->fc_conf.pause_time);
3239
3240                 /* configure the timer value included in transmitted pause
3241                  * frame,
3242                  * the value for stat_tx_pause_quanta[8] is used for global
3243                  * pause operation
3244                  */
3245                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3246                                pf->fc_conf.pause_time);
3247
3248                 fctrl_reg = I40E_READ_REG(hw,
3249                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3250
3251                 if (fc_conf->mac_ctrl_frame_fwd != 0)
3252                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3253                 else
3254                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3255
3256                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3257                                fctrl_reg);
3258         } else {
3259                 /* Configure pause time (2 TCs per register) */
3260                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3261                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3262                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3263
3264                 /* Configure flow control refresh threshold value */
3265                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3266                                pf->fc_conf.pause_time / 2);
3267
3268                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3269
3270                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3271                  *depending on configuration
3272                  */
3273                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3274                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3275                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3276                 } else {
3277                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3278                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3279                 }
3280
3281                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3282         }
3283
3284         /* config the water marker both based on the packets and bytes */
3285         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
3286                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3287                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3288         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
3289                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3290                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3291         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
3292                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3293                        << I40E_KILOSHIFT);
3294         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
3295                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3296                        << I40E_KILOSHIFT);
3297
3298         I40E_WRITE_FLUSH(hw);
3299
3300         return 0;
3301 }
3302
3303 static int
3304 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3305                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3306 {
3307         PMD_INIT_FUNC_TRACE();
3308
3309         return -ENOSYS;
3310 }
3311
3312 /* Add a MAC address, and update filters */
3313 static int
3314 i40e_macaddr_add(struct rte_eth_dev *dev,
3315                  struct ether_addr *mac_addr,
3316                  __rte_unused uint32_t index,
3317                  uint32_t pool)
3318 {
3319         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3320         struct i40e_mac_filter_info mac_filter;
3321         struct i40e_vsi *vsi;
3322         int ret;
3323
3324         /* If VMDQ not enabled or configured, return */
3325         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3326                           !pf->nb_cfg_vmdq_vsi)) {
3327                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3328                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3329                         pool);
3330                 return -ENOTSUP;
3331         }
3332
3333         if (pool > pf->nb_cfg_vmdq_vsi) {
3334                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3335                                 pool, pf->nb_cfg_vmdq_vsi);
3336                 return -EINVAL;
3337         }
3338
3339         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3340         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3341                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3342         else
3343                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3344
3345         if (pool == 0)
3346                 vsi = pf->main_vsi;
3347         else
3348                 vsi = pf->vmdq[pool - 1].vsi;
3349
3350         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3351         if (ret != I40E_SUCCESS) {
3352                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3353                 return -ENODEV;
3354         }
3355         return 0;
3356 }
3357
3358 /* Remove a MAC address, and update filters */
3359 static void
3360 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3361 {
3362         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3363         struct i40e_vsi *vsi;
3364         struct rte_eth_dev_data *data = dev->data;
3365         struct ether_addr *macaddr;
3366         int ret;
3367         uint32_t i;
3368         uint64_t pool_sel;
3369
3370         macaddr = &(data->mac_addrs[index]);
3371
3372         pool_sel = dev->data->mac_pool_sel[index];
3373
3374         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3375                 if (pool_sel & (1ULL << i)) {
3376                         if (i == 0)
3377                                 vsi = pf->main_vsi;
3378                         else {
3379                                 /* No VMDQ pool enabled or configured */
3380                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3381                                         (i > pf->nb_cfg_vmdq_vsi)) {
3382                                         PMD_DRV_LOG(ERR,
3383                                                 "No VMDQ pool enabled/configured");
3384                                         return;
3385                                 }
3386                                 vsi = pf->vmdq[i - 1].vsi;
3387                         }
3388                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3389
3390                         if (ret) {
3391                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3392                                 return;
3393                         }
3394                 }
3395         }
3396 }
3397
3398 /* Set perfect match or hash match of MAC and VLAN for a VF */
3399 static int
3400 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3401                  struct rte_eth_mac_filter *filter,
3402                  bool add)
3403 {
3404         struct i40e_hw *hw;
3405         struct i40e_mac_filter_info mac_filter;
3406         struct ether_addr old_mac;
3407         struct ether_addr *new_mac;
3408         struct i40e_pf_vf *vf = NULL;
3409         uint16_t vf_id;
3410         int ret;
3411
3412         if (pf == NULL) {
3413                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3414                 return -EINVAL;
3415         }
3416         hw = I40E_PF_TO_HW(pf);
3417
3418         if (filter == NULL) {
3419                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3420                 return -EINVAL;
3421         }
3422
3423         new_mac = &filter->mac_addr;
3424
3425         if (is_zero_ether_addr(new_mac)) {
3426                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3427                 return -EINVAL;
3428         }
3429
3430         vf_id = filter->dst_id;
3431
3432         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3433                 PMD_DRV_LOG(ERR, "Invalid argument.");
3434                 return -EINVAL;
3435         }
3436         vf = &pf->vfs[vf_id];
3437
3438         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3439                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3440                 return -EINVAL;
3441         }
3442
3443         if (add) {
3444                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3445                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3446                                 ETHER_ADDR_LEN);
3447                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3448                                  ETHER_ADDR_LEN);
3449
3450                 mac_filter.filter_type = filter->filter_type;
3451                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3452                 if (ret != I40E_SUCCESS) {
3453                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3454                         return -1;
3455                 }
3456                 ether_addr_copy(new_mac, &pf->dev_addr);
3457         } else {
3458                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3459                                 ETHER_ADDR_LEN);
3460                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3461                 if (ret != I40E_SUCCESS) {
3462                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3463                         return -1;
3464                 }
3465
3466                 /* Clear device address as it has been removed */
3467                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3468                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3469         }
3470
3471         return 0;
3472 }
3473
3474 /* MAC filter handle */
3475 static int
3476 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3477                 void *arg)
3478 {
3479         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3480         struct rte_eth_mac_filter *filter;
3481         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3482         int ret = I40E_NOT_SUPPORTED;
3483
3484         filter = (struct rte_eth_mac_filter *)(arg);
3485
3486         switch (filter_op) {
3487         case RTE_ETH_FILTER_NOP:
3488                 ret = I40E_SUCCESS;
3489                 break;
3490         case RTE_ETH_FILTER_ADD:
3491                 i40e_pf_disable_irq0(hw);
3492                 if (filter->is_vf)
3493                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3494                 i40e_pf_enable_irq0(hw);
3495                 break;
3496         case RTE_ETH_FILTER_DELETE:
3497                 i40e_pf_disable_irq0(hw);
3498                 if (filter->is_vf)
3499                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3500                 i40e_pf_enable_irq0(hw);
3501                 break;
3502         default:
3503                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3504                 ret = I40E_ERR_PARAM;
3505                 break;
3506         }
3507
3508         return ret;
3509 }
3510
3511 static int
3512 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3513 {
3514         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3515         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3516         int ret;
3517
3518         if (!lut)
3519                 return -EINVAL;
3520
3521         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3522                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3523                                           lut, lut_size);
3524                 if (ret) {
3525                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3526                         return ret;
3527                 }
3528         } else {
3529                 uint32_t *lut_dw = (uint32_t *)lut;
3530                 uint16_t i, lut_size_dw = lut_size / 4;
3531
3532                 for (i = 0; i < lut_size_dw; i++)
3533                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
3534         }
3535
3536         return 0;
3537 }
3538
3539 static int
3540 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3541 {
3542         struct i40e_pf *pf;
3543         struct i40e_hw *hw;
3544         int ret;
3545
3546         if (!vsi || !lut)
3547                 return -EINVAL;
3548
3549         pf = I40E_VSI_TO_PF(vsi);
3550         hw = I40E_VSI_TO_HW(vsi);
3551
3552         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3553                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3554                                           lut, lut_size);
3555                 if (ret) {
3556                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3557                         return ret;
3558                 }
3559         } else {
3560                 uint32_t *lut_dw = (uint32_t *)lut;
3561                 uint16_t i, lut_size_dw = lut_size / 4;
3562
3563                 for (i = 0; i < lut_size_dw; i++)
3564                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
3565                 I40E_WRITE_FLUSH(hw);
3566         }
3567
3568         return 0;
3569 }
3570
3571 static int
3572 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3573                          struct rte_eth_rss_reta_entry64 *reta_conf,
3574                          uint16_t reta_size)
3575 {
3576         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3577         uint16_t i, lut_size = pf->hash_lut_size;
3578         uint16_t idx, shift;
3579         uint8_t *lut;
3580         int ret;
3581
3582         if (reta_size != lut_size ||
3583                 reta_size > ETH_RSS_RETA_SIZE_512) {
3584                 PMD_DRV_LOG(ERR,
3585                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3586                         reta_size, lut_size);
3587                 return -EINVAL;
3588         }
3589
3590         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3591         if (!lut) {
3592                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3593                 return -ENOMEM;
3594         }
3595         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3596         if (ret)
3597                 goto out;
3598         for (i = 0; i < reta_size; i++) {
3599                 idx = i / RTE_RETA_GROUP_SIZE;
3600                 shift = i % RTE_RETA_GROUP_SIZE;
3601                 if (reta_conf[idx].mask & (1ULL << shift))
3602                         lut[i] = reta_conf[idx].reta[shift];
3603         }
3604         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3605
3606 out:
3607         rte_free(lut);
3608
3609         return ret;
3610 }
3611
3612 static int
3613 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3614                         struct rte_eth_rss_reta_entry64 *reta_conf,
3615                         uint16_t reta_size)
3616 {
3617         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3618         uint16_t i, lut_size = pf->hash_lut_size;
3619         uint16_t idx, shift;
3620         uint8_t *lut;
3621         int ret;
3622
3623         if (reta_size != lut_size ||
3624                 reta_size > ETH_RSS_RETA_SIZE_512) {
3625                 PMD_DRV_LOG(ERR,
3626                         "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
3627                         reta_size, lut_size);
3628                 return -EINVAL;
3629         }
3630
3631         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3632         if (!lut) {
3633                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3634                 return -ENOMEM;
3635         }
3636
3637         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3638         if (ret)
3639                 goto out;
3640         for (i = 0; i < reta_size; i++) {
3641                 idx = i / RTE_RETA_GROUP_SIZE;
3642                 shift = i % RTE_RETA_GROUP_SIZE;
3643                 if (reta_conf[idx].mask & (1ULL << shift))
3644                         reta_conf[idx].reta[shift] = lut[i];
3645         }
3646
3647 out:
3648         rte_free(lut);
3649
3650         return ret;
3651 }
3652
3653 /**
3654  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3655  * @hw:   pointer to the HW structure
3656  * @mem:  pointer to mem struct to fill out
3657  * @size: size of memory requested
3658  * @alignment: what to align the allocation to
3659  **/
3660 enum i40e_status_code
3661 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3662                         struct i40e_dma_mem *mem,
3663                         u64 size,
3664                         u32 alignment)
3665 {
3666         const struct rte_memzone *mz = NULL;
3667         char z_name[RTE_MEMZONE_NAMESIZE];
3668
3669         if (!mem)
3670                 return I40E_ERR_PARAM;
3671
3672         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3673         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3674                                          alignment, RTE_PGSIZE_2M);
3675         if (!mz)
3676                 return I40E_ERR_NO_MEMORY;
3677
3678         mem->size = size;
3679         mem->va = mz->addr;
3680         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
3681         mem->zone = (const void *)mz;
3682         PMD_DRV_LOG(DEBUG,
3683                 "memzone %s allocated with physical address: %"PRIu64,
3684                 mz->name, mem->pa);
3685
3686         return I40E_SUCCESS;
3687 }
3688
3689 /**
3690  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3691  * @hw:   pointer to the HW structure
3692  * @mem:  ptr to mem struct to free
3693  **/
3694 enum i40e_status_code
3695 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3696                     struct i40e_dma_mem *mem)
3697 {
3698         if (!mem)
3699                 return I40E_ERR_PARAM;
3700
3701         PMD_DRV_LOG(DEBUG,
3702                 "memzone %s to be freed with physical address: %"PRIu64,
3703                 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
3704         rte_memzone_free((const struct rte_memzone *)mem->zone);
3705         mem->zone = NULL;
3706         mem->va = NULL;
3707         mem->pa = (u64)0;
3708
3709         return I40E_SUCCESS;
3710 }
3711
3712 /**
3713  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3714  * @hw:   pointer to the HW structure
3715  * @mem:  pointer to mem struct to fill out
3716  * @size: size of memory requested
3717  **/
3718 enum i40e_status_code
3719 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3720                          struct i40e_virt_mem *mem,
3721                          u32 size)
3722 {
3723         if (!mem)
3724                 return I40E_ERR_PARAM;
3725
3726         mem->size = size;
3727         mem->va = rte_zmalloc("i40e", size, 0);
3728
3729         if (mem->va)
3730                 return I40E_SUCCESS;
3731         else
3732                 return I40E_ERR_NO_MEMORY;
3733 }
3734
3735 /**
3736  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3737  * @hw:   pointer to the HW structure
3738  * @mem:  pointer to mem struct to free
3739  **/
3740 enum i40e_status_code
3741 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3742                      struct i40e_virt_mem *mem)
3743 {
3744         if (!mem)
3745                 return I40E_ERR_PARAM;
3746
3747         rte_free(mem->va);
3748         mem->va = NULL;
3749
3750         return I40E_SUCCESS;
3751 }
3752
3753 void
3754 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3755 {
3756         rte_spinlock_init(&sp->spinlock);
3757 }
3758
3759 void
3760 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3761 {
3762         rte_spinlock_lock(&sp->spinlock);
3763 }
3764
3765 void
3766 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3767 {
3768         rte_spinlock_unlock(&sp->spinlock);
3769 }
3770
3771 void
3772 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3773 {
3774         return;
3775 }
3776
3777 /**
3778  * Get the hardware capabilities, which will be parsed
3779  * and saved into struct i40e_hw.
3780  */
3781 static int
3782 i40e_get_cap(struct i40e_hw *hw)
3783 {
3784         struct i40e_aqc_list_capabilities_element_resp *buf;
3785         uint16_t len, size = 0;
3786         int ret;
3787
3788         /* Calculate a huge enough buff for saving response data temporarily */
3789         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3790                                                 I40E_MAX_CAP_ELE_NUM;
3791         buf = rte_zmalloc("i40e", len, 0);
3792         if (!buf) {
3793                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3794                 return I40E_ERR_NO_MEMORY;
3795         }
3796
3797         /* Get, parse the capabilities and save it to hw */
3798         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3799                         i40e_aqc_opc_list_func_capabilities, NULL);
3800         if (ret != I40E_SUCCESS)
3801                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3802
3803         /* Free the temporary buffer after being used */
3804         rte_free(buf);
3805
3806         return ret;
3807 }
3808
3809 static int
3810 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3811 {
3812         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3813         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3814         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3815         uint16_t qp_count = 0, vsi_count = 0;
3816
3817         if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3818                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3819                 return -EINVAL;
3820         }
3821         /* Add the parameter init for LFC */
3822         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3823         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3824         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3825
3826         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3827         pf->max_num_vsi = hw->func_caps.num_vsis;
3828         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3829         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3830         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3831
3832         /* FDir queue/VSI allocation */
3833         pf->fdir_qp_offset = 0;
3834         if (hw->func_caps.fd) {
3835                 pf->flags |= I40E_FLAG_FDIR;
3836                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3837         } else {
3838                 pf->fdir_nb_qps = 0;
3839         }
3840         qp_count += pf->fdir_nb_qps;
3841         vsi_count += 1;
3842
3843         /* LAN queue/VSI allocation */
3844         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3845         if (!hw->func_caps.rss) {
3846                 pf->lan_nb_qps = 1;
3847         } else {
3848                 pf->flags |= I40E_FLAG_RSS;
3849                 if (hw->mac.type == I40E_MAC_X722)
3850                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3851                 pf->lan_nb_qps = pf->lan_nb_qp_max;
3852         }
3853         qp_count += pf->lan_nb_qps;
3854         vsi_count += 1;
3855
3856         /* VF queue/VSI allocation */
3857         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3858         if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
3859                 pf->flags |= I40E_FLAG_SRIOV;
3860                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3861                 pf->vf_num = pci_dev->max_vfs;
3862                 PMD_DRV_LOG(DEBUG,
3863                         "%u VF VSIs, %u queues per VF VSI, in total %u queues",
3864                         pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
3865         } else {
3866                 pf->vf_nb_qps = 0;
3867                 pf->vf_num = 0;
3868         }
3869         qp_count += pf->vf_nb_qps * pf->vf_num;
3870         vsi_count += pf->vf_num;
3871
3872         /* VMDq queue/VSI allocation */
3873         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3874         pf->vmdq_nb_qps = 0;
3875         pf->max_nb_vmdq_vsi = 0;
3876         if (hw->func_caps.vmdq) {
3877                 if (qp_count < hw->func_caps.num_tx_qp &&
3878                         vsi_count < hw->func_caps.num_vsis) {
3879                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
3880                                 qp_count) / pf->vmdq_nb_qp_max;
3881
3882                         /* Limit the maximum number of VMDq vsi to the maximum
3883                          * ethdev can support
3884                          */
3885                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3886                                 hw->func_caps.num_vsis - vsi_count);
3887                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3888                                 ETH_64_POOLS);
3889                         if (pf->max_nb_vmdq_vsi) {
3890                                 pf->flags |= I40E_FLAG_VMDQ;
3891                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3892                                 PMD_DRV_LOG(DEBUG,
3893                                         "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
3894                                         pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
3895                                         pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
3896                         } else {
3897                                 PMD_DRV_LOG(INFO,
3898                                         "No enough queues left for VMDq");
3899                         }
3900                 } else {
3901                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
3902                 }
3903         }
3904         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3905         vsi_count += pf->max_nb_vmdq_vsi;
3906
3907         if (hw->func_caps.dcb)
3908                 pf->flags |= I40E_FLAG_DCB;
3909
3910         if (qp_count > hw->func_caps.num_tx_qp) {
3911                 PMD_DRV_LOG(ERR,
3912                         "Failed to allocate %u queues, which exceeds the hardware maximum %u",
3913                         qp_count, hw->func_caps.num_tx_qp);
3914                 return -EINVAL;
3915         }
3916         if (vsi_count > hw->func_caps.num_vsis) {
3917                 PMD_DRV_LOG(ERR,
3918                         "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
3919                         vsi_count, hw->func_caps.num_vsis);
3920                 return -EINVAL;
3921         }
3922
3923         return 0;
3924 }
3925
3926 static int
3927 i40e_pf_get_switch_config(struct i40e_pf *pf)
3928 {
3929         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3930         struct i40e_aqc_get_switch_config_resp *switch_config;
3931         struct i40e_aqc_switch_config_element_resp *element;
3932         uint16_t start_seid = 0, num_reported;
3933         int ret;
3934
3935         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3936                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3937         if (!switch_config) {
3938                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
3939                 return -ENOMEM;
3940         }
3941
3942         /* Get the switch configurations */
3943         ret = i40e_aq_get_switch_config(hw, switch_config,
3944                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
3945         if (ret != I40E_SUCCESS) {
3946                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3947                 goto fail;
3948         }
3949         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3950         if (num_reported != 1) { /* The number should be 1 */
3951                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3952                 goto fail;
3953         }
3954
3955         /* Parse the switch configuration elements */
3956         element = &(switch_config->element[0]);
3957         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3958                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3959                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3960         } else
3961                 PMD_DRV_LOG(INFO, "Unknown element type");
3962
3963 fail:
3964         rte_free(switch_config);
3965
3966         return ret;
3967 }
3968
3969 static int
3970 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
3971                         uint32_t num)
3972 {
3973         struct pool_entry *entry;
3974
3975         if (pool == NULL || num == 0)
3976                 return -EINVAL;
3977
3978         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
3979         if (entry == NULL) {
3980                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
3981                 return -ENOMEM;
3982         }
3983
3984         /* queue heap initialize */
3985         pool->num_free = num;
3986         pool->num_alloc = 0;
3987         pool->base = base;
3988         LIST_INIT(&pool->alloc_list);
3989         LIST_INIT(&pool->free_list);
3990
3991         /* Initialize element  */
3992         entry->base = 0;
3993         entry->len = num;
3994
3995         LIST_INSERT_HEAD(&pool->free_list, entry, next);
3996         return 0;
3997 }
3998
3999 static void
4000 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4001 {
4002         struct pool_entry *entry, *next_entry;
4003
4004         if (pool == NULL)
4005                 return;
4006
4007         for (entry = LIST_FIRST(&pool->alloc_list);
4008                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4009                         entry = next_entry) {
4010                 LIST_REMOVE(entry, next);
4011                 rte_free(entry);
4012         }
4013
4014         for (entry = LIST_FIRST(&pool->free_list);
4015                         entry && (next_entry = LIST_NEXT(entry, next), 1);
4016                         entry = next_entry) {
4017                 LIST_REMOVE(entry, next);
4018                 rte_free(entry);
4019         }
4020
4021         pool->num_free = 0;
4022         pool->num_alloc = 0;
4023         pool->base = 0;
4024         LIST_INIT(&pool->alloc_list);
4025         LIST_INIT(&pool->free_list);
4026 }
4027
4028 static int
4029 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4030                        uint32_t base)
4031 {
4032         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4033         uint32_t pool_offset;
4034         int insert;
4035
4036         if (pool == NULL) {
4037                 PMD_DRV_LOG(ERR, "Invalid parameter");
4038                 return -EINVAL;
4039         }
4040
4041         pool_offset = base - pool->base;
4042         /* Lookup in alloc list */
4043         LIST_FOREACH(entry, &pool->alloc_list, next) {
4044                 if (entry->base == pool_offset) {
4045                         valid_entry = entry;
4046                         LIST_REMOVE(entry, next);
4047                         break;
4048                 }
4049         }
4050
4051         /* Not find, return */
4052         if (valid_entry == NULL) {
4053                 PMD_DRV_LOG(ERR, "Failed to find entry");
4054                 return -EINVAL;
4055         }
4056
4057         /**
4058          * Found it, move it to free list  and try to merge.
4059          * In order to make merge easier, always sort it by qbase.
4060          * Find adjacent prev and last entries.
4061          */
4062         prev = next = NULL;
4063         LIST_FOREACH(entry, &pool->free_list, next) {
4064                 if (entry->base > valid_entry->base) {
4065                         next = entry;
4066                         break;
4067                 }
4068                 prev = entry;
4069         }
4070
4071         insert = 0;
4072         /* Try to merge with next one*/
4073         if (next != NULL) {
4074                 /* Merge with next one */
4075                 if (valid_entry->base + valid_entry->len == next->base) {
4076                         next->base = valid_entry->base;
4077                         next->len += valid_entry->len;
4078                         rte_free(valid_entry);
4079                         valid_entry = next;
4080                         insert = 1;
4081                 }
4082         }
4083
4084         if (prev != NULL) {
4085                 /* Merge with previous one */
4086                 if (prev->base + prev->len == valid_entry->base) {
4087                         prev->len += valid_entry->len;
4088                         /* If it merge with next one, remove next node */
4089                         if (insert == 1) {
4090                                 LIST_REMOVE(valid_entry, next);
4091                                 rte_free(valid_entry);
4092                         } else {
4093                                 rte_free(valid_entry);
4094                                 insert = 1;
4095                         }
4096                 }
4097         }
4098
4099         /* Not find any entry to merge, insert */
4100         if (insert == 0) {
4101                 if (prev != NULL)
4102                         LIST_INSERT_AFTER(prev, valid_entry, next);
4103                 else if (next != NULL)
4104                         LIST_INSERT_BEFORE(next, valid_entry, next);
4105                 else /* It's empty list, insert to head */
4106                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4107         }
4108
4109         pool->num_free += valid_entry->len;
4110         pool->num_alloc -= valid_entry->len;
4111
4112         return 0;
4113 }
4114
4115 static int
4116 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4117                        uint16_t num)
4118 {
4119         struct pool_entry *entry, *valid_entry;
4120
4121         if (pool == NULL || num == 0) {
4122                 PMD_DRV_LOG(ERR, "Invalid parameter");
4123                 return -EINVAL;
4124         }
4125
4126         if (pool->num_free < num) {
4127                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4128                             num, pool->num_free);
4129                 return -ENOMEM;
4130         }
4131
4132         valid_entry = NULL;
4133         /* Lookup  in free list and find most fit one */
4134         LIST_FOREACH(entry, &pool->free_list, next) {
4135                 if (entry->len >= num) {
4136                         /* Find best one */
4137                         if (entry->len == num) {
4138                                 valid_entry = entry;
4139                                 break;
4140                         }
4141                         if (valid_entry == NULL || valid_entry->len > entry->len)
4142                                 valid_entry = entry;
4143                 }
4144         }
4145
4146         /* Not find one to satisfy the request, return */
4147         if (valid_entry == NULL) {
4148                 PMD_DRV_LOG(ERR, "No valid entry found");
4149                 return -ENOMEM;
4150         }
4151         /**
4152          * The entry have equal queue number as requested,
4153          * remove it from alloc_list.
4154          */
4155         if (valid_entry->len == num) {
4156                 LIST_REMOVE(valid_entry, next);
4157         } else {
4158                 /**
4159                  * The entry have more numbers than requested,
4160                  * create a new entry for alloc_list and minus its
4161                  * queue base and number in free_list.
4162                  */
4163                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4164                 if (entry == NULL) {
4165                         PMD_DRV_LOG(ERR,
4166                                 "Failed to allocate memory for resource pool");
4167                         return -ENOMEM;
4168                 }
4169                 entry->base = valid_entry->base;
4170                 entry->len = num;
4171                 valid_entry->base += num;
4172                 valid_entry->len -= num;
4173                 valid_entry = entry;
4174         }
4175
4176         /* Insert it into alloc list, not sorted */
4177         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4178
4179         pool->num_free -= valid_entry->len;
4180         pool->num_alloc += valid_entry->len;
4181
4182         return valid_entry->base + pool->base;
4183 }
4184
4185 /**
4186  * bitmap_is_subset - Check whether src2 is subset of src1
4187  **/
4188 static inline int
4189 bitmap_is_subset(uint8_t src1, uint8_t src2)
4190 {
4191         return !((src1 ^ src2) & src2);
4192 }
4193
4194 static enum i40e_status_code
4195 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4196 {
4197         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4198
4199         /* If DCB is not supported, only default TC is supported */
4200         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4201                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4202                 return I40E_NOT_SUPPORTED;
4203         }
4204
4205         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4206                 PMD_DRV_LOG(ERR,
4207                         "Enabled TC map 0x%x not applicable to HW support 0x%x",
4208                         hw->func_caps.enabled_tcmap, enabled_tcmap);
4209                 return I40E_NOT_SUPPORTED;
4210         }
4211         return I40E_SUCCESS;
4212 }
4213
4214 int
4215 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4216                                 struct i40e_vsi_vlan_pvid_info *info)
4217 {
4218         struct i40e_hw *hw;
4219         struct i40e_vsi_context ctxt;
4220         uint8_t vlan_flags = 0;
4221         int ret;
4222
4223         if (vsi == NULL || info == NULL) {
4224                 PMD_DRV_LOG(ERR, "invalid parameters");
4225                 return I40E_ERR_PARAM;
4226         }
4227
4228         if (info->on) {
4229                 vsi->info.pvid = info->config.pvid;
4230                 /**
4231                  * If insert pvid is enabled, only tagged pkts are
4232                  * allowed to be sent out.
4233                  */
4234                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4235                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4236         } else {
4237                 vsi->info.pvid = 0;
4238                 if (info->config.reject.tagged == 0)
4239                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4240
4241                 if (info->config.reject.untagged == 0)
4242                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4243         }
4244         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4245                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
4246         vsi->info.port_vlan_flags |= vlan_flags;
4247         vsi->info.valid_sections =
4248                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4249         memset(&ctxt, 0, sizeof(ctxt));
4250         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4251         ctxt.seid = vsi->seid;
4252
4253         hw = I40E_VSI_TO_HW(vsi);
4254         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4255         if (ret != I40E_SUCCESS)
4256                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4257
4258         return ret;
4259 }
4260
4261 static int
4262 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4263 {
4264         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4265         int i, ret;
4266         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4267
4268         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4269         if (ret != I40E_SUCCESS)
4270                 return ret;
4271
4272         if (!vsi->seid) {
4273                 PMD_DRV_LOG(ERR, "seid not valid");
4274                 return -EINVAL;
4275         }
4276
4277         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4278         tc_bw_data.tc_valid_bits = enabled_tcmap;
4279         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4280                 tc_bw_data.tc_bw_credits[i] =
4281                         (enabled_tcmap & (1 << i)) ? 1 : 0;
4282
4283         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4284         if (ret != I40E_SUCCESS) {
4285                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4286                 return ret;
4287         }
4288
4289         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4290                                         sizeof(vsi->info.qs_handle));
4291         return I40E_SUCCESS;
4292 }
4293
4294 static enum i40e_status_code
4295 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4296                                  struct i40e_aqc_vsi_properties_data *info,
4297                                  uint8_t enabled_tcmap)
4298 {
4299         enum i40e_status_code ret;
4300         int i, total_tc = 0;
4301         uint16_t qpnum_per_tc, bsf, qp_idx;
4302
4303         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4304         if (ret != I40E_SUCCESS)
4305                 return ret;
4306
4307         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4308                 if (enabled_tcmap & (1 << i))
4309                         total_tc++;
4310         if (total_tc == 0)
4311                 total_tc = 1;
4312         vsi->enabled_tc = enabled_tcmap;
4313
4314         /* Number of queues per enabled TC */
4315         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4316         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4317         bsf = rte_bsf32(qpnum_per_tc);
4318
4319         /* Adjust the queue number to actual queues that can be applied */
4320         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4321                 vsi->nb_qps = qpnum_per_tc * total_tc;
4322
4323         /**
4324          * Configure TC and queue mapping parameters, for enabled TC,
4325          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4326          * default queue will serve it.
4327          */
4328         qp_idx = 0;
4329         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4330                 if (vsi->enabled_tc & (1 << i)) {
4331                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4332                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4333                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4334                         qp_idx += qpnum_per_tc;
4335                 } else
4336                         info->tc_mapping[i] = 0;
4337         }
4338
4339         /* Associate queue number with VSI */
4340         if (vsi->type == I40E_VSI_SRIOV) {
4341                 info->mapping_flags |=
4342                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4343                 for (i = 0; i < vsi->nb_qps; i++)
4344                         info->queue_mapping[i] =
4345                                 rte_cpu_to_le_16(vsi->base_queue + i);
4346         } else {
4347                 info->mapping_flags |=
4348                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4349                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4350         }
4351         info->valid_sections |=
4352                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4353
4354         return I40E_SUCCESS;
4355 }
4356
4357 static int
4358 i40e_veb_release(struct i40e_veb *veb)
4359 {
4360         struct i40e_vsi *vsi;
4361         struct i40e_hw *hw;
4362
4363         if (veb == NULL)
4364                 return -EINVAL;
4365
4366         if (!TAILQ_EMPTY(&veb->head)) {
4367                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4368                 return -EACCES;
4369         }
4370         /* associate_vsi field is NULL for floating VEB */
4371         if (veb->associate_vsi != NULL) {
4372                 vsi = veb->associate_vsi;
4373                 hw = I40E_VSI_TO_HW(vsi);
4374
4375                 vsi->uplink_seid = veb->uplink_seid;
4376                 vsi->veb = NULL;
4377         } else {
4378                 veb->associate_pf->main_vsi->floating_veb = NULL;
4379                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4380         }
4381
4382         i40e_aq_delete_element(hw, veb->seid, NULL);
4383         rte_free(veb);
4384         return I40E_SUCCESS;
4385 }
4386
4387 /* Setup a veb */
4388 static struct i40e_veb *
4389 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4390 {
4391         struct i40e_veb *veb;
4392         int ret;
4393         struct i40e_hw *hw;
4394
4395         if (pf == NULL) {
4396                 PMD_DRV_LOG(ERR,
4397                             "veb setup failed, associated PF shouldn't null");
4398                 return NULL;
4399         }
4400         hw = I40E_PF_TO_HW(pf);
4401
4402         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4403         if (!veb) {
4404                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4405                 goto fail;
4406         }
4407
4408         veb->associate_vsi = vsi;
4409         veb->associate_pf = pf;
4410         TAILQ_INIT(&veb->head);
4411         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4412
4413         /* create floating veb if vsi is NULL */
4414         if (vsi != NULL) {
4415                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4416                                       I40E_DEFAULT_TCMAP, false,
4417                                       &veb->seid, false, NULL);
4418         } else {
4419                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4420                                       true, &veb->seid, false, NULL);
4421         }
4422
4423         if (ret != I40E_SUCCESS) {
4424                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4425                             hw->aq.asq_last_status);
4426                 goto fail;
4427         }
4428         veb->enabled_tc = I40E_DEFAULT_TCMAP;
4429
4430         /* get statistics index */
4431         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4432                                 &veb->stats_idx, NULL, NULL, NULL);
4433         if (ret != I40E_SUCCESS) {
4434                 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
4435                             hw->aq.asq_last_status);
4436                 goto fail;
4437         }
4438         /* Get VEB bandwidth, to be implemented */
4439         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4440         if (vsi)
4441                 vsi->uplink_seid = veb->seid;
4442
4443         return veb;
4444 fail:
4445         rte_free(veb);
4446         return NULL;
4447 }
4448
4449 int
4450 i40e_vsi_release(struct i40e_vsi *vsi)
4451 {
4452         struct i40e_pf *pf;
4453         struct i40e_hw *hw;
4454         struct i40e_vsi_list *vsi_list;
4455         void *temp;
4456         int ret;
4457         struct i40e_mac_filter *f;
4458         uint16_t user_param;
4459
4460         if (!vsi)
4461                 return I40E_SUCCESS;
4462
4463         if (!vsi->adapter)
4464                 return -EFAULT;
4465
4466         user_param = vsi->user_param;
4467
4468         pf = I40E_VSI_TO_PF(vsi);
4469         hw = I40E_VSI_TO_HW(vsi);
4470
4471         /* VSI has child to attach, release child first */
4472         if (vsi->veb) {
4473                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4474                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4475                                 return -1;
4476                 }
4477                 i40e_veb_release(vsi->veb);
4478         }
4479
4480         if (vsi->floating_veb) {
4481                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4482                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4483                                 return -1;
4484                 }
4485         }
4486
4487         /* Remove all macvlan filters of the VSI */
4488         i40e_vsi_remove_all_macvlan_filter(vsi);
4489         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4490                 rte_free(f);
4491
4492         if (vsi->type != I40E_VSI_MAIN &&
4493             ((vsi->type != I40E_VSI_SRIOV) ||
4494             !pf->floating_veb_list[user_param])) {
4495                 /* Remove vsi from parent's sibling list */
4496                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4497                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4498                         return I40E_ERR_PARAM;
4499                 }
4500                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4501                                 &vsi->sib_vsi_list, list);
4502
4503                 /* Remove all switch element of the VSI */
4504                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4505                 if (ret != I40E_SUCCESS)
4506                         PMD_DRV_LOG(ERR, "Failed to delete element");
4507         }
4508
4509         if ((vsi->type == I40E_VSI_SRIOV) &&
4510             pf->floating_veb_list[user_param]) {
4511                 /* Remove vsi from parent's sibling list */
4512                 if (vsi->parent_vsi == NULL ||
4513                     vsi->parent_vsi->floating_veb == NULL) {
4514                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4515                         return I40E_ERR_PARAM;
4516                 }
4517                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4518                              &vsi->sib_vsi_list, list);
4519
4520                 /* Remove all switch element of the VSI */
4521                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4522                 if (ret != I40E_SUCCESS)
4523                         PMD_DRV_LOG(ERR, "Failed to delete element");
4524         }
4525
4526         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4527
4528         if (vsi->type != I40E_VSI_SRIOV)
4529                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4530         rte_free(vsi);
4531
4532         return I40E_SUCCESS;
4533 }
4534
4535 static int
4536 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4537 {
4538         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4539         struct i40e_aqc_remove_macvlan_element_data def_filter;
4540         struct i40e_mac_filter_info filter;
4541         int ret;
4542
4543         if (vsi->type != I40E_VSI_MAIN)
4544                 return I40E_ERR_CONFIG;
4545         memset(&def_filter, 0, sizeof(def_filter));
4546         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4547                                         ETH_ADDR_LEN);
4548         def_filter.vlan_tag = 0;
4549         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4550                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4551         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4552         if (ret != I40E_SUCCESS) {
4553                 struct i40e_mac_filter *f;
4554                 struct ether_addr *mac;
4555
4556                 PMD_DRV_LOG(DEBUG,
4557                             "Cannot remove the default macvlan filter");
4558                 /* It needs to add the permanent mac into mac list */
4559                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4560                 if (f == NULL) {
4561                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4562                         return I40E_ERR_NO_MEMORY;
4563                 }
4564                 mac = &f->mac_info.mac_addr;
4565                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4566                                 ETH_ADDR_LEN);
4567                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4568                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4569                 vsi->mac_num++;
4570
4571                 return ret;
4572         }
4573         (void)rte_memcpy(&filter.mac_addr,
4574                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4575         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4576         return i40e_vsi_add_mac(vsi, &filter);
4577 }
4578
4579 /*
4580  * i40e_vsi_get_bw_config - Query VSI BW Information
4581  * @vsi: the VSI to be queried
4582  *
4583  * Returns 0 on success, negative value on failure
4584  */
4585 static enum i40e_status_code
4586 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4587 {
4588         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4589         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4590         struct i40e_hw *hw = &vsi->adapter->hw;
4591         i40e_status ret;
4592         int i;
4593         uint32_t bw_max;
4594
4595         memset(&bw_config, 0, sizeof(bw_config));
4596         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4597         if (ret != I40E_SUCCESS) {
4598                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4599                             hw->aq.asq_last_status);
4600                 return ret;
4601         }
4602
4603         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4604         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4605                                         &ets_sla_config, NULL);
4606         if (ret != I40E_SUCCESS) {
4607                 PMD_DRV_LOG(ERR,
4608                         "VSI failed to get TC bandwdith configuration %u",
4609                         hw->aq.asq_last_status);
4610                 return ret;
4611         }
4612
4613         /* store and print out BW info */
4614         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4615         vsi->bw_info.bw_max = bw_config.max_bw;
4616         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4617         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4618         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4619                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4620                      I40E_16_BIT_WIDTH);
4621         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4622                 vsi->bw_info.bw_ets_share_credits[i] =
4623                                 ets_sla_config.share_credits[i];
4624                 vsi->bw_info.bw_ets_credits[i] =
4625                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
4626                 /* 4 bits per TC, 4th bit is reserved */
4627                 vsi->bw_info.bw_ets_max[i] =
4628                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4629                                   RTE_LEN2MASK(3, uint8_t));
4630                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4631                             vsi->bw_info.bw_ets_share_credits[i]);
4632                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4633                             vsi->bw_info.bw_ets_credits[i]);
4634                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4635                             vsi->bw_info.bw_ets_max[i]);
4636         }
4637
4638         return I40E_SUCCESS;
4639 }
4640
4641 /* i40e_enable_pf_lb
4642  * @pf: pointer to the pf structure
4643  *
4644  * allow loopback on pf
4645  */
4646 static inline void
4647 i40e_enable_pf_lb(struct i40e_pf *pf)
4648 {
4649         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4650         struct i40e_vsi_context ctxt;
4651         int ret;
4652
4653         /* Use the FW API if FW >= v5.0 */
4654         if (hw->aq.fw_maj_ver < 5) {
4655                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4656                 return;
4657         }
4658
4659         memset(&ctxt, 0, sizeof(ctxt));
4660         ctxt.seid = pf->main_vsi_seid;
4661         ctxt.pf_num = hw->pf_id;
4662         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4663         if (ret) {
4664                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4665                             ret, hw->aq.asq_last_status);
4666                 return;
4667         }
4668         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4669         ctxt.info.valid_sections =
4670                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4671         ctxt.info.switch_id |=
4672                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4673
4674         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4675         if (ret)
4676                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
4677                             hw->aq.asq_last_status);
4678 }
4679
4680 /* Setup a VSI */
4681 struct i40e_vsi *
4682 i40e_vsi_setup(struct i40e_pf *pf,
4683                enum i40e_vsi_type type,
4684                struct i40e_vsi *uplink_vsi,
4685                uint16_t user_param)
4686 {
4687         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4688         struct i40e_vsi *vsi;
4689         struct i40e_mac_filter_info filter;
4690         int ret;
4691         struct i40e_vsi_context ctxt;
4692         struct ether_addr broadcast =
4693                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4694
4695         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4696             uplink_vsi == NULL) {
4697                 PMD_DRV_LOG(ERR,
4698                         "VSI setup failed, VSI link shouldn't be NULL");
4699                 return NULL;
4700         }
4701
4702         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4703                 PMD_DRV_LOG(ERR,
4704                         "VSI setup failed, MAIN VSI uplink VSI should be NULL");
4705                 return NULL;
4706         }
4707
4708         /* two situations
4709          * 1.type is not MAIN and uplink vsi is not NULL
4710          * If uplink vsi didn't setup VEB, create one first under veb field
4711          * 2.type is SRIOV and the uplink is NULL
4712          * If floating VEB is NULL, create one veb under floating veb field
4713          */
4714
4715         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
4716             uplink_vsi->veb == NULL) {
4717                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
4718
4719                 if (uplink_vsi->veb == NULL) {
4720                         PMD_DRV_LOG(ERR, "VEB setup failed");
4721                         return NULL;
4722                 }
4723                 /* set ALLOWLOOPBACk on pf, when veb is created */
4724                 i40e_enable_pf_lb(pf);
4725         }
4726
4727         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
4728             pf->main_vsi->floating_veb == NULL) {
4729                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
4730
4731                 if (pf->main_vsi->floating_veb == NULL) {
4732                         PMD_DRV_LOG(ERR, "VEB setup failed");
4733                         return NULL;
4734                 }
4735         }
4736
4737         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
4738         if (!vsi) {
4739                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
4740                 return NULL;
4741         }
4742         TAILQ_INIT(&vsi->mac_list);
4743         vsi->type = type;
4744         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
4745         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
4746         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
4747         vsi->user_param = user_param;
4748         vsi->vlan_anti_spoof_on = 0;
4749         vsi->vlan_filter_on = 0;
4750         /* Allocate queues */
4751         switch (vsi->type) {
4752         case I40E_VSI_MAIN  :
4753                 vsi->nb_qps = pf->lan_nb_qps;
4754                 break;
4755         case I40E_VSI_SRIOV :
4756                 vsi->nb_qps = pf->vf_nb_qps;
4757                 break;
4758         case I40E_VSI_VMDQ2:
4759                 vsi->nb_qps = pf->vmdq_nb_qps;
4760                 break;
4761         case I40E_VSI_FDIR:
4762                 vsi->nb_qps = pf->fdir_nb_qps;
4763                 break;
4764         default:
4765                 goto fail_mem;
4766         }
4767         /*
4768          * The filter status descriptor is reported in rx queue 0,
4769          * while the tx queue for fdir filter programming has no
4770          * such constraints, can be non-zero queues.
4771          * To simplify it, choose FDIR vsi use queue 0 pair.
4772          * To make sure it will use queue 0 pair, queue allocation
4773          * need be done before this function is called
4774          */
4775         if (type != I40E_VSI_FDIR) {
4776                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
4777                         if (ret < 0) {
4778                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
4779                                                 vsi->seid, ret);
4780                                 goto fail_mem;
4781                         }
4782                         vsi->base_queue = ret;
4783         } else
4784                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
4785
4786         /* VF has MSIX interrupt in VF range, don't allocate here */
4787         if (type == I40E_VSI_MAIN) {
4788                 ret = i40e_res_pool_alloc(&pf->msix_pool,
4789                                           RTE_MIN(vsi->nb_qps,
4790                                                   RTE_MAX_RXTX_INTR_VEC_ID));
4791                 if (ret < 0) {
4792                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
4793                                     vsi->seid, ret);
4794                         goto fail_queue_alloc;
4795                 }
4796                 vsi->msix_intr = ret;
4797                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
4798         } else if (type != I40E_VSI_SRIOV) {
4799                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
4800                 if (ret < 0) {
4801                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
4802                         goto fail_queue_alloc;
4803                 }
4804                 vsi->msix_intr = ret;
4805                 vsi->nb_msix = 1;
4806         } else {
4807                 vsi->msix_intr = 0;
4808                 vsi->nb_msix = 0;
4809         }
4810
4811         /* Add VSI */
4812         if (type == I40E_VSI_MAIN) {
4813                 /* For main VSI, no need to add since it's default one */
4814                 vsi->uplink_seid = pf->mac_seid;
4815                 vsi->seid = pf->main_vsi_seid;
4816                 /* Bind queues with specific MSIX interrupt */
4817                 /**
4818                  * Needs 2 interrupt at least, one for misc cause which will
4819                  * enabled from OS side, Another for queues binding the
4820                  * interrupt from device side only.
4821                  */
4822
4823                 /* Get default VSI parameters from hardware */
4824                 memset(&ctxt, 0, sizeof(ctxt));
4825                 ctxt.seid = vsi->seid;
4826                 ctxt.pf_num = hw->pf_id;
4827                 ctxt.uplink_seid = vsi->uplink_seid;
4828                 ctxt.vf_num = 0;
4829                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4830                 if (ret != I40E_SUCCESS) {
4831                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
4832                         goto fail_msix_alloc;
4833                 }
4834                 (void)rte_memcpy(&vsi->info, &ctxt.info,
4835                         sizeof(struct i40e_aqc_vsi_properties_data));
4836                 vsi->vsi_id = ctxt.vsi_number;
4837                 vsi->info.valid_sections = 0;
4838
4839                 /* Configure tc, enabled TC0 only */
4840                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
4841                         I40E_SUCCESS) {
4842                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
4843                         goto fail_msix_alloc;
4844                 }
4845
4846                 /* TC, queue mapping */
4847                 memset(&ctxt, 0, sizeof(ctxt));
4848                 vsi->info.valid_sections |=
4849                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4850                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
4851                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4852                 (void)rte_memcpy(&ctxt.info, &vsi->info,
4853                         sizeof(struct i40e_aqc_vsi_properties_data));
4854                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4855                                                 I40E_DEFAULT_TCMAP);
4856                 if (ret != I40E_SUCCESS) {
4857                         PMD_DRV_LOG(ERR,
4858                                 "Failed to configure TC queue mapping");
4859                         goto fail_msix_alloc;
4860                 }
4861                 ctxt.seid = vsi->seid;
4862                 ctxt.pf_num = hw->pf_id;
4863                 ctxt.uplink_seid = vsi->uplink_seid;
4864                 ctxt.vf_num = 0;
4865
4866                 /* Update VSI parameters */
4867                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4868                 if (ret != I40E_SUCCESS) {
4869                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
4870                         goto fail_msix_alloc;
4871                 }
4872
4873                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
4874                                                 sizeof(vsi->info.tc_mapping));
4875                 (void)rte_memcpy(&vsi->info.queue_mapping,
4876                                 &ctxt.info.queue_mapping,
4877                         sizeof(vsi->info.queue_mapping));
4878                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
4879                 vsi->info.valid_sections = 0;
4880
4881                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
4882                                 ETH_ADDR_LEN);
4883
4884                 /**
4885                  * Updating default filter settings are necessary to prevent
4886                  * reception of tagged packets.
4887                  * Some old firmware configurations load a default macvlan
4888                  * filter which accepts both tagged and untagged packets.
4889                  * The updating is to use a normal filter instead if needed.
4890                  * For NVM 4.2.2 or after, the updating is not needed anymore.
4891                  * The firmware with correct configurations load the default
4892                  * macvlan filter which is expected and cannot be removed.
4893                  */
4894                 i40e_update_default_filter_setting(vsi);
4895                 i40e_config_qinq(hw, vsi);
4896         } else if (type == I40E_VSI_SRIOV) {
4897                 memset(&ctxt, 0, sizeof(ctxt));
4898                 /**
4899                  * For other VSI, the uplink_seid equals to uplink VSI's
4900                  * uplink_seid since they share same VEB
4901                  */
4902                 if (uplink_vsi == NULL)
4903                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
4904                 else
4905                         vsi->uplink_seid = uplink_vsi->uplink_seid;
4906                 ctxt.pf_num = hw->pf_id;
4907                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4908                 ctxt.uplink_seid = vsi->uplink_seid;
4909                 ctxt.connection_type = 0x1;
4910                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4911
4912                 /* Use the VEB configuration if FW >= v5.0 */
4913                 if (hw->aq.fw_maj_ver >= 5) {
4914                         /* Configure switch ID */
4915                         ctxt.info.valid_sections |=
4916                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4917                         ctxt.info.switch_id =
4918                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4919                 }
4920
4921                 /* Configure port/vlan */
4922                 ctxt.info.valid_sections |=
4923                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4924                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4925                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4926                                                 hw->func_caps.enabled_tcmap);
4927                 if (ret != I40E_SUCCESS) {
4928                         PMD_DRV_LOG(ERR,
4929                                 "Failed to configure TC queue mapping");
4930                         goto fail_msix_alloc;
4931                 }
4932
4933                 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
4934                 ctxt.info.valid_sections |=
4935                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4936                 /**
4937                  * Since VSI is not created yet, only configure parameter,
4938                  * will add vsi below.
4939                  */
4940
4941                 i40e_config_qinq(hw, vsi);
4942         } else if (type == I40E_VSI_VMDQ2) {
4943                 memset(&ctxt, 0, sizeof(ctxt));
4944                 /*
4945                  * For other VSI, the uplink_seid equals to uplink VSI's
4946                  * uplink_seid since they share same VEB
4947                  */
4948                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4949                 ctxt.pf_num = hw->pf_id;
4950                 ctxt.vf_num = 0;
4951                 ctxt.uplink_seid = vsi->uplink_seid;
4952                 ctxt.connection_type = 0x1;
4953                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4954
4955                 ctxt.info.valid_sections |=
4956                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4957                 /* user_param carries flag to enable loop back */
4958                 if (user_param) {
4959                         ctxt.info.switch_id =
4960                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
4961                         ctxt.info.switch_id |=
4962                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4963                 }
4964
4965                 /* Configure port/vlan */
4966                 ctxt.info.valid_sections |=
4967                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4968                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4969                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4970                                                 I40E_DEFAULT_TCMAP);
4971                 if (ret != I40E_SUCCESS) {
4972                         PMD_DRV_LOG(ERR,
4973                                 "Failed to configure TC queue mapping");
4974                         goto fail_msix_alloc;
4975                 }
4976                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4977                 ctxt.info.valid_sections |=
4978                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4979         } else if (type == I40E_VSI_FDIR) {
4980                 memset(&ctxt, 0, sizeof(ctxt));
4981                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4982                 ctxt.pf_num = hw->pf_id;
4983                 ctxt.vf_num = 0;
4984                 ctxt.uplink_seid = vsi->uplink_seid;
4985                 ctxt.connection_type = 0x1;     /* regular data port */
4986                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4987                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4988                                                 I40E_DEFAULT_TCMAP);
4989                 if (ret != I40E_SUCCESS) {
4990                         PMD_DRV_LOG(ERR,
4991                                 "Failed to configure TC queue mapping.");
4992                         goto fail_msix_alloc;
4993                 }
4994                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4995                 ctxt.info.valid_sections |=
4996                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4997         } else {
4998                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
4999                 goto fail_msix_alloc;
5000         }
5001
5002         if (vsi->type != I40E_VSI_MAIN) {
5003                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5004                 if (ret != I40E_SUCCESS) {
5005                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5006                                     hw->aq.asq_last_status);
5007                         goto fail_msix_alloc;
5008                 }
5009                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5010                 vsi->info.valid_sections = 0;
5011                 vsi->seid = ctxt.seid;
5012                 vsi->vsi_id = ctxt.vsi_number;
5013                 vsi->sib_vsi_list.vsi = vsi;
5014                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5015                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5016                                           &vsi->sib_vsi_list, list);
5017                 } else {
5018                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5019                                           &vsi->sib_vsi_list, list);
5020                 }
5021         }
5022
5023         /* MAC/VLAN configuration */
5024         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5025         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5026
5027         ret = i40e_vsi_add_mac(vsi, &filter);
5028         if (ret != I40E_SUCCESS) {
5029                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5030                 goto fail_msix_alloc;
5031         }
5032
5033         /* Get VSI BW information */
5034         i40e_vsi_get_bw_config(vsi);
5035         return vsi;
5036 fail_msix_alloc:
5037         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5038 fail_queue_alloc:
5039         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5040 fail_mem:
5041         rte_free(vsi);
5042         return NULL;
5043 }
5044
5045 /* Configure vlan filter on or off */
5046 int
5047 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5048 {
5049         int i, num;
5050         struct i40e_mac_filter *f;
5051         void *temp;
5052         struct i40e_mac_filter_info *mac_filter;
5053         enum rte_mac_filter_type desired_filter;
5054         int ret = I40E_SUCCESS;
5055
5056         if (on) {
5057                 /* Filter to match MAC and VLAN */
5058                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5059         } else {
5060                 /* Filter to match only MAC */
5061                 desired_filter = RTE_MAC_PERFECT_MATCH;
5062         }
5063
5064         num = vsi->mac_num;
5065
5066         mac_filter = rte_zmalloc("mac_filter_info_data",
5067                                  num * sizeof(*mac_filter), 0);
5068         if (mac_filter == NULL) {
5069                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5070                 return I40E_ERR_NO_MEMORY;
5071         }
5072
5073         i = 0;
5074
5075         /* Remove all existing mac */
5076         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5077                 mac_filter[i] = f->mac_info;
5078                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5079                 if (ret) {
5080                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5081                                     on ? "enable" : "disable");
5082                         goto DONE;
5083                 }
5084                 i++;
5085         }
5086
5087         /* Override with new filter */
5088         for (i = 0; i < num; i++) {
5089                 mac_filter[i].filter_type = desired_filter;
5090                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5091                 if (ret) {
5092                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5093                                     on ? "enable" : "disable");
5094                         goto DONE;
5095                 }
5096         }
5097
5098 DONE:
5099         rte_free(mac_filter);
5100         return ret;
5101 }
5102
5103 /* Configure vlan stripping on or off */
5104 int
5105 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5106 {
5107         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5108         struct i40e_vsi_context ctxt;
5109         uint8_t vlan_flags;
5110         int ret = I40E_SUCCESS;
5111
5112         /* Check if it has been already on or off */
5113         if (vsi->info.valid_sections &
5114                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5115                 if (on) {
5116                         if ((vsi->info.port_vlan_flags &
5117                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5118                                 return 0; /* already on */
5119                 } else {
5120                         if ((vsi->info.port_vlan_flags &
5121                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5122                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5123                                 return 0; /* already off */
5124                 }
5125         }
5126
5127         if (on)
5128                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5129         else
5130                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5131         vsi->info.valid_sections =
5132                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5133         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5134         vsi->info.port_vlan_flags |= vlan_flags;
5135         ctxt.seid = vsi->seid;
5136         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5137         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5138         if (ret)
5139                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5140                             on ? "enable" : "disable");
5141
5142         return ret;
5143 }
5144
5145 static int
5146 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5147 {
5148         struct rte_eth_dev_data *data = dev->data;
5149         int ret;
5150         int mask = 0;
5151
5152         /* Apply vlan offload setting */
5153         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
5154         i40e_vlan_offload_set(dev, mask);
5155
5156         /* Apply double-vlan setting, not implemented yet */
5157
5158         /* Apply pvid setting */
5159         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5160                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
5161         if (ret)
5162                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5163
5164         return ret;
5165 }
5166
5167 static int
5168 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5169 {
5170         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5171
5172         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5173 }
5174
5175 static int
5176 i40e_update_flow_control(struct i40e_hw *hw)
5177 {
5178 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5179         struct i40e_link_status link_status;
5180         uint32_t rxfc = 0, txfc = 0, reg;
5181         uint8_t an_info;
5182         int ret;
5183
5184         memset(&link_status, 0, sizeof(link_status));
5185         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5186         if (ret != I40E_SUCCESS) {
5187                 PMD_DRV_LOG(ERR, "Failed to get link status information");
5188                 goto write_reg; /* Disable flow control */
5189         }
5190
5191         an_info = hw->phy.link_info.an_info;
5192         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5193                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5194                 ret = I40E_ERR_NOT_READY;
5195                 goto write_reg; /* Disable flow control */
5196         }
5197         /**
5198          * If link auto negotiation is enabled, flow control needs to
5199          * be configured according to it
5200          */
5201         switch (an_info & I40E_LINK_PAUSE_RXTX) {
5202         case I40E_LINK_PAUSE_RXTX:
5203                 rxfc = 1;
5204                 txfc = 1;
5205                 hw->fc.current_mode = I40E_FC_FULL;
5206                 break;
5207         case I40E_AQ_LINK_PAUSE_RX:
5208                 rxfc = 1;
5209                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5210                 break;
5211         case I40E_AQ_LINK_PAUSE_TX:
5212                 txfc = 1;
5213                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5214                 break;
5215         default:
5216                 hw->fc.current_mode = I40E_FC_NONE;
5217                 break;
5218         }
5219
5220 write_reg:
5221         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5222                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5223         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5224         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5225         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5226         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5227
5228         return ret;
5229 }
5230
5231 /* PF setup */
5232 static int
5233 i40e_pf_setup(struct i40e_pf *pf)
5234 {
5235         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5236         struct i40e_filter_control_settings settings;
5237         struct i40e_vsi *vsi;
5238         int ret;
5239
5240         /* Clear all stats counters */
5241         pf->offset_loaded = FALSE;
5242         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5243         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5244         pf->internal_rx_bytes = 0;
5245         pf->internal_tx_bytes = 0;
5246         pf->internal_rx_bytes_offset = 0;
5247         pf->internal_tx_bytes_offset = 0;
5248
5249         ret = i40e_pf_get_switch_config(pf);
5250         if (ret != I40E_SUCCESS) {
5251                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5252                 return ret;
5253         }
5254         if (pf->flags & I40E_FLAG_FDIR) {
5255                 /* make queue allocated first, let FDIR use queue pair 0*/
5256                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5257                 if (ret != I40E_FDIR_QUEUE_ID) {
5258                         PMD_DRV_LOG(ERR,
5259                                 "queue allocation fails for FDIR: ret =%d",
5260                                 ret);
5261                         pf->flags &= ~I40E_FLAG_FDIR;
5262                 }
5263         }
5264         /*  main VSI setup */
5265         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5266         if (!vsi) {
5267                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5268                 return I40E_ERR_NOT_READY;
5269         }
5270         pf->main_vsi = vsi;
5271
5272         /* Configure filter control */
5273         memset(&settings, 0, sizeof(settings));
5274         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5275                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5276         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5277                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5278         else {
5279                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5280                         hw->func_caps.rss_table_size);
5281                 return I40E_ERR_PARAM;
5282         }
5283         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5284                 hw->func_caps.rss_table_size);
5285         pf->hash_lut_size = hw->func_caps.rss_table_size;
5286
5287         /* Enable ethtype and macvlan filters */
5288         settings.enable_ethtype = TRUE;
5289         settings.enable_macvlan = TRUE;
5290         ret = i40e_set_filter_control(hw, &settings);
5291         if (ret)
5292                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5293                                                                 ret);
5294
5295         /* Update flow control according to the auto negotiation */
5296         i40e_update_flow_control(hw);
5297
5298         return I40E_SUCCESS;
5299 }
5300
5301 int
5302 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5303 {
5304         uint32_t reg;
5305         uint16_t j;
5306
5307         /**
5308          * Set or clear TX Queue Disable flags,
5309          * which is required by hardware.
5310          */
5311         i40e_pre_tx_queue_cfg(hw, q_idx, on);
5312         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5313
5314         /* Wait until the request is finished */
5315         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5316                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5317                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5318                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5319                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5320                                                         & 0x1))) {
5321                         break;
5322                 }
5323         }
5324         if (on) {
5325                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5326                         return I40E_SUCCESS; /* already on, skip next steps */
5327
5328                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5329                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5330         } else {
5331                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5332                         return I40E_SUCCESS; /* already off, skip next steps */
5333                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5334         }
5335         /* Write the register */
5336         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5337         /* Check the result */
5338         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5339                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5340                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5341                 if (on) {
5342                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5343                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5344                                 break;
5345                 } else {
5346                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5347                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5348                                 break;
5349                 }
5350         }
5351         /* Check if it is timeout */
5352         if (j >= I40E_CHK_Q_ENA_COUNT) {
5353                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5354                             (on ? "enable" : "disable"), q_idx);
5355                 return I40E_ERR_TIMEOUT;
5356         }
5357
5358         return I40E_SUCCESS;
5359 }
5360
5361 /* Swith on or off the tx queues */
5362 static int
5363 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5364 {
5365         struct rte_eth_dev_data *dev_data = pf->dev_data;
5366         struct i40e_tx_queue *txq;
5367         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5368         uint16_t i;
5369         int ret;
5370
5371         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5372                 txq = dev_data->tx_queues[i];
5373                 /* Don't operate the queue if not configured or
5374                  * if starting only per queue */
5375                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5376                         continue;
5377                 if (on)
5378                         ret = i40e_dev_tx_queue_start(dev, i);
5379                 else
5380                         ret = i40e_dev_tx_queue_stop(dev, i);
5381                 if ( ret != I40E_SUCCESS)
5382                         return ret;
5383         }
5384
5385         return I40E_SUCCESS;
5386 }
5387
5388 int
5389 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5390 {
5391         uint32_t reg;
5392         uint16_t j;
5393
5394         /* Wait until the request is finished */
5395         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5396                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5397                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5398                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5399                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5400                         break;
5401         }
5402
5403         if (on) {
5404                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5405                         return I40E_SUCCESS; /* Already on, skip next steps */
5406                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5407         } else {
5408                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5409                         return I40E_SUCCESS; /* Already off, skip next steps */
5410                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5411         }
5412
5413         /* Write the register */
5414         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5415         /* Check the result */
5416         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5417                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5418                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5419                 if (on) {
5420                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5421                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5422                                 break;
5423                 } else {
5424                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5425                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5426                                 break;
5427                 }
5428         }
5429
5430         /* Check if it is timeout */
5431         if (j >= I40E_CHK_Q_ENA_COUNT) {
5432                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5433                             (on ? "enable" : "disable"), q_idx);
5434                 return I40E_ERR_TIMEOUT;
5435         }
5436
5437         return I40E_SUCCESS;
5438 }
5439 /* Switch on or off the rx queues */
5440 static int
5441 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5442 {
5443         struct rte_eth_dev_data *dev_data = pf->dev_data;
5444         struct i40e_rx_queue *rxq;
5445         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5446         uint16_t i;
5447         int ret;
5448
5449         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5450                 rxq = dev_data->rx_queues[i];
5451                 /* Don't operate the queue if not configured or
5452                  * if starting only per queue */
5453                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5454                         continue;
5455                 if (on)
5456                         ret = i40e_dev_rx_queue_start(dev, i);
5457                 else
5458                         ret = i40e_dev_rx_queue_stop(dev, i);
5459                 if (ret != I40E_SUCCESS)
5460                         return ret;
5461         }
5462
5463         return I40E_SUCCESS;
5464 }
5465
5466 /* Switch on or off all the rx/tx queues */
5467 int
5468 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5469 {
5470         int ret;
5471
5472         if (on) {
5473                 /* enable rx queues before enabling tx queues */
5474                 ret = i40e_dev_switch_rx_queues(pf, on);
5475                 if (ret) {
5476                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5477                         return ret;
5478                 }
5479                 ret = i40e_dev_switch_tx_queues(pf, on);
5480         } else {
5481                 /* Stop tx queues before stopping rx queues */
5482                 ret = i40e_dev_switch_tx_queues(pf, on);
5483                 if (ret) {
5484                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5485                         return ret;
5486                 }
5487                 ret = i40e_dev_switch_rx_queues(pf, on);
5488         }
5489
5490         return ret;
5491 }
5492
5493 /* Initialize VSI for TX */
5494 static int
5495 i40e_dev_tx_init(struct i40e_pf *pf)
5496 {
5497         struct rte_eth_dev_data *data = pf->dev_data;
5498         uint16_t i;
5499         uint32_t ret = I40E_SUCCESS;
5500         struct i40e_tx_queue *txq;
5501
5502         for (i = 0; i < data->nb_tx_queues; i++) {
5503                 txq = data->tx_queues[i];
5504                 if (!txq || !txq->q_set)
5505                         continue;
5506                 ret = i40e_tx_queue_init(txq);
5507                 if (ret != I40E_SUCCESS)
5508                         break;
5509         }
5510         if (ret == I40E_SUCCESS)
5511                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5512                                      ->eth_dev);
5513
5514         return ret;
5515 }
5516
5517 /* Initialize VSI for RX */
5518 static int
5519 i40e_dev_rx_init(struct i40e_pf *pf)
5520 {
5521         struct rte_eth_dev_data *data = pf->dev_data;
5522         int ret = I40E_SUCCESS;
5523         uint16_t i;
5524         struct i40e_rx_queue *rxq;
5525
5526         i40e_pf_config_mq_rx(pf);
5527         for (i = 0; i < data->nb_rx_queues; i++) {
5528                 rxq = data->rx_queues[i];
5529                 if (!rxq || !rxq->q_set)
5530                         continue;
5531
5532                 ret = i40e_rx_queue_init(rxq);
5533                 if (ret != I40E_SUCCESS) {
5534                         PMD_DRV_LOG(ERR,
5535                                 "Failed to do RX queue initialization");
5536                         break;
5537                 }
5538         }
5539         if (ret == I40E_SUCCESS)
5540                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5541                                      ->eth_dev);
5542
5543         return ret;
5544 }
5545
5546 static int
5547 i40e_dev_rxtx_init(struct i40e_pf *pf)
5548 {
5549         int err;
5550
5551         err = i40e_dev_tx_init(pf);
5552         if (err) {
5553                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5554                 return err;
5555         }
5556         err = i40e_dev_rx_init(pf);
5557         if (err) {
5558                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5559                 return err;
5560         }
5561
5562         return err;
5563 }
5564
5565 static int
5566 i40e_vmdq_setup(struct rte_eth_dev *dev)
5567 {
5568         struct rte_eth_conf *conf = &dev->data->dev_conf;
5569         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5570         int i, err, conf_vsis, j, loop;
5571         struct i40e_vsi *vsi;
5572         struct i40e_vmdq_info *vmdq_info;
5573         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5574         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5575
5576         /*
5577          * Disable interrupt to avoid message from VF. Furthermore, it will
5578          * avoid race condition in VSI creation/destroy.
5579          */
5580         i40e_pf_disable_irq0(hw);
5581
5582         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5583                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5584                 return -ENOTSUP;
5585         }
5586
5587         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5588         if (conf_vsis > pf->max_nb_vmdq_vsi) {
5589                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5590                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5591                         pf->max_nb_vmdq_vsi);
5592                 return -ENOTSUP;
5593         }
5594
5595         if (pf->vmdq != NULL) {
5596                 PMD_INIT_LOG(INFO, "VMDQ already configured");
5597                 return 0;
5598         }
5599
5600         pf->vmdq = rte_zmalloc("vmdq_info_struct",
5601                                 sizeof(*vmdq_info) * conf_vsis, 0);
5602
5603         if (pf->vmdq == NULL) {
5604                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5605                 return -ENOMEM;
5606         }
5607
5608         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5609
5610         /* Create VMDQ VSI */
5611         for (i = 0; i < conf_vsis; i++) {
5612                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5613                                 vmdq_conf->enable_loop_back);
5614                 if (vsi == NULL) {
5615                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5616                         err = -1;
5617                         goto err_vsi_setup;
5618                 }
5619                 vmdq_info = &pf->vmdq[i];
5620                 vmdq_info->pf = pf;
5621                 vmdq_info->vsi = vsi;
5622         }
5623         pf->nb_cfg_vmdq_vsi = conf_vsis;
5624
5625         /* Configure Vlan */
5626         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5627         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5628                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5629                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5630                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5631                                         vmdq_conf->pool_map[i].vlan_id, j);
5632
5633                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5634                                                 vmdq_conf->pool_map[i].vlan_id);
5635                                 if (err) {
5636                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
5637                                         err = -1;
5638                                         goto err_vsi_setup;
5639                                 }
5640                         }
5641                 }
5642         }
5643
5644         i40e_pf_enable_irq0(hw);
5645
5646         return 0;
5647
5648 err_vsi_setup:
5649         for (i = 0; i < conf_vsis; i++)
5650                 if (pf->vmdq[i].vsi == NULL)
5651                         break;
5652                 else
5653                         i40e_vsi_release(pf->vmdq[i].vsi);
5654
5655         rte_free(pf->vmdq);
5656         pf->vmdq = NULL;
5657         i40e_pf_enable_irq0(hw);
5658         return err;
5659 }
5660
5661 static void
5662 i40e_stat_update_32(struct i40e_hw *hw,
5663                    uint32_t reg,
5664                    bool offset_loaded,
5665                    uint64_t *offset,
5666                    uint64_t *stat)
5667 {
5668         uint64_t new_data;
5669
5670         new_data = (uint64_t)I40E_READ_REG(hw, reg);
5671         if (!offset_loaded)
5672                 *offset = new_data;
5673
5674         if (new_data >= *offset)
5675                 *stat = (uint64_t)(new_data - *offset);
5676         else
5677                 *stat = (uint64_t)((new_data +
5678                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5679 }
5680
5681 static void
5682 i40e_stat_update_48(struct i40e_hw *hw,
5683                    uint32_t hireg,
5684                    uint32_t loreg,
5685                    bool offset_loaded,
5686                    uint64_t *offset,
5687                    uint64_t *stat)
5688 {
5689         uint64_t new_data;
5690
5691         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5692         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5693                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5694
5695         if (!offset_loaded)
5696                 *offset = new_data;
5697
5698         if (new_data >= *offset)
5699                 *stat = new_data - *offset;
5700         else
5701                 *stat = (uint64_t)((new_data +
5702                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5703
5704         *stat &= I40E_48_BIT_MASK;
5705 }
5706
5707 /* Disable IRQ0 */
5708 void
5709 i40e_pf_disable_irq0(struct i40e_hw *hw)
5710 {
5711         /* Disable all interrupt types */
5712         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
5713         I40E_WRITE_FLUSH(hw);
5714 }
5715
5716 /* Enable IRQ0 */
5717 void
5718 i40e_pf_enable_irq0(struct i40e_hw *hw)
5719 {
5720         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
5721                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
5722                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
5723                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
5724         I40E_WRITE_FLUSH(hw);
5725 }
5726
5727 static void
5728 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
5729 {
5730         /* read pending request and disable first */
5731         i40e_pf_disable_irq0(hw);
5732         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
5733         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
5734                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
5735
5736         if (no_queue)
5737                 /* Link no queues with irq0 */
5738                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
5739                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
5740 }
5741
5742 static void
5743 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
5744 {
5745         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5746         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5747         int i;
5748         uint16_t abs_vf_id;
5749         uint32_t index, offset, val;
5750
5751         if (!pf->vfs)
5752                 return;
5753         /**
5754          * Try to find which VF trigger a reset, use absolute VF id to access
5755          * since the reg is global register.
5756          */
5757         for (i = 0; i < pf->vf_num; i++) {
5758                 abs_vf_id = hw->func_caps.vf_base_id + i;
5759                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
5760                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
5761                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
5762                 /* VFR event occurred */
5763                 if (val & (0x1 << offset)) {
5764                         int ret;
5765
5766                         /* Clear the event first */
5767                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
5768                                                         (0x1 << offset));
5769                         PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
5770                         /**
5771                          * Only notify a VF reset event occurred,
5772                          * don't trigger another SW reset
5773                          */
5774                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
5775                         if (ret != I40E_SUCCESS)
5776                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
5777                 }
5778         }
5779 }
5780
5781 static void
5782 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
5783 {
5784         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5785         int i;
5786
5787         for (i = 0; i < pf->vf_num; i++)
5788                 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
5789 }
5790
5791 static void
5792 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
5793 {
5794         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5795         struct i40e_arq_event_info info;
5796         uint16_t pending, opcode;
5797         int ret;
5798
5799         info.buf_len = I40E_AQ_BUF_SZ;
5800         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
5801         if (!info.msg_buf) {
5802                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
5803                 return;
5804         }
5805
5806         pending = 1;
5807         while (pending) {
5808                 ret = i40e_clean_arq_element(hw, &info, &pending);
5809
5810                 if (ret != I40E_SUCCESS) {
5811                         PMD_DRV_LOG(INFO,
5812                                 "Failed to read msg from AdminQ, aq_err: %u",
5813                                 hw->aq.asq_last_status);
5814                         break;
5815                 }
5816                 opcode = rte_le_to_cpu_16(info.desc.opcode);
5817
5818                 switch (opcode) {
5819                 case i40e_aqc_opc_send_msg_to_pf:
5820                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
5821                         i40e_pf_host_handle_vf_msg(dev,
5822                                         rte_le_to_cpu_16(info.desc.retval),
5823                                         rte_le_to_cpu_32(info.desc.cookie_high),
5824                                         rte_le_to_cpu_32(info.desc.cookie_low),
5825                                         info.msg_buf,
5826                                         info.msg_len);
5827                         break;
5828                 case i40e_aqc_opc_get_link_status:
5829                         ret = i40e_dev_link_update(dev, 0);
5830                         if (!ret)
5831                                 _rte_eth_dev_callback_process(dev,
5832                                         RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
5833                         break;
5834                 default:
5835                         PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
5836                                     opcode);
5837                         break;
5838                 }
5839         }
5840         rte_free(info.msg_buf);
5841 }
5842
5843 /**
5844  * Interrupt handler triggered by NIC  for handling
5845  * specific interrupt.
5846  *
5847  * @param handle
5848  *  Pointer to interrupt handle.
5849  * @param param
5850  *  The address of parameter (struct rte_eth_dev *) regsitered before.
5851  *
5852  * @return
5853  *  void
5854  */
5855 static void
5856 i40e_dev_interrupt_handler(void *param)
5857 {
5858         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5859         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5860         uint32_t icr0;
5861
5862         /* Disable interrupt */
5863         i40e_pf_disable_irq0(hw);
5864
5865         /* read out interrupt causes */
5866         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5867
5868         /* No interrupt event indicated */
5869         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
5870                 PMD_DRV_LOG(INFO, "No interrupt event");
5871                 goto done;
5872         }
5873         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5874                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
5875         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5876                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
5877         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5878                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
5879         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5880                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
5881         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5882                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
5883         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5884                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
5885         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5886                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
5887
5888         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5889                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
5890                 i40e_dev_handle_vfr_event(dev);
5891         }
5892         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5893                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
5894                 i40e_dev_handle_aq_msg(dev);
5895         }
5896
5897 done:
5898         /* Enable interrupt */
5899         i40e_pf_enable_irq0(hw);
5900         rte_intr_enable(dev->intr_handle);
5901 }
5902
5903 int
5904 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
5905                          struct i40e_macvlan_filter *filter,
5906                          int total)
5907 {
5908         int ele_num, ele_buff_size;
5909         int num, actual_num, i;
5910         uint16_t flags;
5911         int ret = I40E_SUCCESS;
5912         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5913         struct i40e_aqc_add_macvlan_element_data *req_list;
5914
5915         if (filter == NULL  || total == 0)
5916                 return I40E_ERR_PARAM;
5917         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5918         ele_buff_size = hw->aq.asq_buf_size;
5919
5920         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5921         if (req_list == NULL) {
5922                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5923                 return I40E_ERR_NO_MEMORY;
5924         }
5925
5926         num = 0;
5927         do {
5928                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5929                 memset(req_list, 0, ele_buff_size);
5930
5931                 for (i = 0; i < actual_num; i++) {
5932                         (void)rte_memcpy(req_list[i].mac_addr,
5933                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5934                         req_list[i].vlan_tag =
5935                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5936
5937                         switch (filter[num + i].filter_type) {
5938                         case RTE_MAC_PERFECT_MATCH:
5939                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5940                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5941                                 break;
5942                         case RTE_MACVLAN_PERFECT_MATCH:
5943                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5944                                 break;
5945                         case RTE_MAC_HASH_MATCH:
5946                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5947                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5948                                 break;
5949                         case RTE_MACVLAN_HASH_MATCH:
5950                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5951                                 break;
5952                         default:
5953                                 PMD_DRV_LOG(ERR, "Invalid MAC match type");
5954                                 ret = I40E_ERR_PARAM;
5955                                 goto DONE;
5956                         }
5957
5958                         req_list[i].queue_number = 0;
5959
5960                         req_list[i].flags = rte_cpu_to_le_16(flags);
5961                 }
5962
5963                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
5964                                                 actual_num, NULL);
5965                 if (ret != I40E_SUCCESS) {
5966                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
5967                         goto DONE;
5968                 }
5969                 num += actual_num;
5970         } while (num < total);
5971
5972 DONE:
5973         rte_free(req_list);
5974         return ret;
5975 }
5976
5977 int
5978 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
5979                             struct i40e_macvlan_filter *filter,
5980                             int total)
5981 {
5982         int ele_num, ele_buff_size;
5983         int num, actual_num, i;
5984         uint16_t flags;
5985         int ret = I40E_SUCCESS;
5986         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5987         struct i40e_aqc_remove_macvlan_element_data *req_list;
5988
5989         if (filter == NULL  || total == 0)
5990                 return I40E_ERR_PARAM;
5991
5992         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5993         ele_buff_size = hw->aq.asq_buf_size;
5994
5995         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
5996         if (req_list == NULL) {
5997                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5998                 return I40E_ERR_NO_MEMORY;
5999         }
6000
6001         num = 0;
6002         do {
6003                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6004                 memset(req_list, 0, ele_buff_size);
6005
6006                 for (i = 0; i < actual_num; i++) {
6007                         (void)rte_memcpy(req_list[i].mac_addr,
6008                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
6009                         req_list[i].vlan_tag =
6010                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
6011
6012                         switch (filter[num + i].filter_type) {
6013                         case RTE_MAC_PERFECT_MATCH:
6014                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6015                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6016                                 break;
6017                         case RTE_MACVLAN_PERFECT_MATCH:
6018                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6019                                 break;
6020                         case RTE_MAC_HASH_MATCH:
6021                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6022                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6023                                 break;
6024                         case RTE_MACVLAN_HASH_MATCH:
6025                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6026                                 break;
6027                         default:
6028                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6029                                 ret = I40E_ERR_PARAM;
6030                                 goto DONE;
6031                         }
6032                         req_list[i].flags = rte_cpu_to_le_16(flags);
6033                 }
6034
6035                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6036                                                 actual_num, NULL);
6037                 if (ret != I40E_SUCCESS) {
6038                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6039                         goto DONE;
6040                 }
6041                 num += actual_num;
6042         } while (num < total);
6043
6044 DONE:
6045         rte_free(req_list);
6046         return ret;
6047 }
6048
6049 /* Find out specific MAC filter */
6050 static struct i40e_mac_filter *
6051 i40e_find_mac_filter(struct i40e_vsi *vsi,
6052                          struct ether_addr *macaddr)
6053 {
6054         struct i40e_mac_filter *f;
6055
6056         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6057                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6058                         return f;
6059         }
6060
6061         return NULL;
6062 }
6063
6064 static bool
6065 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6066                          uint16_t vlan_id)
6067 {
6068         uint32_t vid_idx, vid_bit;
6069
6070         if (vlan_id > ETH_VLAN_ID_MAX)
6071                 return 0;
6072
6073         vid_idx = I40E_VFTA_IDX(vlan_id);
6074         vid_bit = I40E_VFTA_BIT(vlan_id);
6075
6076         if (vsi->vfta[vid_idx] & vid_bit)
6077                 return 1;
6078         else
6079                 return 0;
6080 }
6081
6082 static void
6083 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6084                        uint16_t vlan_id, bool on)
6085 {
6086         uint32_t vid_idx, vid_bit;
6087
6088         vid_idx = I40E_VFTA_IDX(vlan_id);
6089         vid_bit = I40E_VFTA_BIT(vlan_id);
6090
6091         if (on)
6092                 vsi->vfta[vid_idx] |= vid_bit;
6093         else
6094                 vsi->vfta[vid_idx] &= ~vid_bit;
6095 }
6096
6097 void
6098 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6099                      uint16_t vlan_id, bool on)
6100 {
6101         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6102         struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6103         int ret;
6104
6105         if (vlan_id > ETH_VLAN_ID_MAX)
6106                 return;
6107
6108         i40e_store_vlan_filter(vsi, vlan_id, on);
6109
6110         if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6111                 return;
6112
6113         vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6114
6115         if (on) {
6116                 ret = i40e_aq_add_vlan(hw, vsi->seid,
6117                                        &vlan_data, 1, NULL);
6118                 if (ret != I40E_SUCCESS)
6119                         PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6120         } else {
6121                 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6122                                           &vlan_data, 1, NULL);
6123                 if (ret != I40E_SUCCESS)
6124                         PMD_DRV_LOG(ERR,
6125                                     "Failed to remove vlan filter");
6126         }
6127 }
6128
6129 /**
6130  * Find all vlan options for specific mac addr,
6131  * return with actual vlan found.
6132  */
6133 int
6134 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6135                            struct i40e_macvlan_filter *mv_f,
6136                            int num, struct ether_addr *addr)
6137 {
6138         int i;
6139         uint32_t j, k;
6140
6141         /**
6142          * Not to use i40e_find_vlan_filter to decrease the loop time,
6143          * although the code looks complex.
6144           */
6145         if (num < vsi->vlan_num)
6146                 return I40E_ERR_PARAM;
6147
6148         i = 0;
6149         for (j = 0; j < I40E_VFTA_SIZE; j++) {
6150                 if (vsi->vfta[j]) {
6151                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6152                                 if (vsi->vfta[j] & (1 << k)) {
6153                                         if (i > num - 1) {
6154                                                 PMD_DRV_LOG(ERR,
6155                                                         "vlan number doesn't match");
6156                                                 return I40E_ERR_PARAM;
6157                                         }
6158                                         (void)rte_memcpy(&mv_f[i].macaddr,
6159                                                         addr, ETH_ADDR_LEN);
6160                                         mv_f[i].vlan_id =
6161                                                 j * I40E_UINT32_BIT_SIZE + k;
6162                                         i++;
6163                                 }
6164                         }
6165                 }
6166         }
6167         return I40E_SUCCESS;
6168 }
6169
6170 static inline int
6171 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6172                            struct i40e_macvlan_filter *mv_f,
6173                            int num,
6174                            uint16_t vlan)
6175 {
6176         int i = 0;
6177         struct i40e_mac_filter *f;
6178
6179         if (num < vsi->mac_num)
6180                 return I40E_ERR_PARAM;
6181
6182         TAILQ_FOREACH(f, &vsi->mac_list, next) {
6183                 if (i > num - 1) {
6184                         PMD_DRV_LOG(ERR, "buffer number not match");
6185                         return I40E_ERR_PARAM;
6186                 }
6187                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6188                                 ETH_ADDR_LEN);
6189                 mv_f[i].vlan_id = vlan;
6190                 mv_f[i].filter_type = f->mac_info.filter_type;
6191                 i++;
6192         }
6193
6194         return I40E_SUCCESS;
6195 }
6196
6197 static int
6198 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6199 {
6200         int i, j, num;
6201         struct i40e_mac_filter *f;
6202         struct i40e_macvlan_filter *mv_f;
6203         int ret = I40E_SUCCESS;
6204
6205         if (vsi == NULL || vsi->mac_num == 0)
6206                 return I40E_ERR_PARAM;
6207
6208         /* Case that no vlan is set */
6209         if (vsi->vlan_num == 0)
6210                 num = vsi->mac_num;
6211         else
6212                 num = vsi->mac_num * vsi->vlan_num;
6213
6214         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6215         if (mv_f == NULL) {
6216                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6217                 return I40E_ERR_NO_MEMORY;
6218         }
6219
6220         i = 0;
6221         if (vsi->vlan_num == 0) {
6222                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6223                         (void)rte_memcpy(&mv_f[i].macaddr,
6224                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6225                         mv_f[i].filter_type = f->mac_info.filter_type;
6226                         mv_f[i].vlan_id = 0;
6227                         i++;
6228                 }
6229         } else {
6230                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6231                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6232                                         vsi->vlan_num, &f->mac_info.mac_addr);
6233                         if (ret != I40E_SUCCESS)
6234                                 goto DONE;
6235                         for (j = i; j < i + vsi->vlan_num; j++)
6236                                 mv_f[j].filter_type = f->mac_info.filter_type;
6237                         i += vsi->vlan_num;
6238                 }
6239         }
6240
6241         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6242 DONE:
6243         rte_free(mv_f);
6244
6245         return ret;
6246 }
6247
6248 int
6249 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6250 {
6251         struct i40e_macvlan_filter *mv_f;
6252         int mac_num;
6253         int ret = I40E_SUCCESS;
6254
6255         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6256                 return I40E_ERR_PARAM;
6257
6258         /* If it's already set, just return */
6259         if (i40e_find_vlan_filter(vsi,vlan))
6260                 return I40E_SUCCESS;
6261
6262         mac_num = vsi->mac_num;
6263
6264         if (mac_num == 0) {
6265                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6266                 return I40E_ERR_PARAM;
6267         }
6268
6269         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6270
6271         if (mv_f == NULL) {
6272                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6273                 return I40E_ERR_NO_MEMORY;
6274         }
6275
6276         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6277
6278         if (ret != I40E_SUCCESS)
6279                 goto DONE;
6280
6281         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6282
6283         if (ret != I40E_SUCCESS)
6284                 goto DONE;
6285
6286         i40e_set_vlan_filter(vsi, vlan, 1);
6287
6288         vsi->vlan_num++;
6289         ret = I40E_SUCCESS;
6290 DONE:
6291         rte_free(mv_f);
6292         return ret;
6293 }
6294
6295 int
6296 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6297 {
6298         struct i40e_macvlan_filter *mv_f;
6299         int mac_num;
6300         int ret = I40E_SUCCESS;
6301
6302         /**
6303          * Vlan 0 is the generic filter for untagged packets
6304          * and can't be removed.
6305          */
6306         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6307                 return I40E_ERR_PARAM;
6308
6309         /* If can't find it, just return */
6310         if (!i40e_find_vlan_filter(vsi, vlan))
6311                 return I40E_ERR_PARAM;
6312
6313         mac_num = vsi->mac_num;
6314
6315         if (mac_num == 0) {
6316                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6317                 return I40E_ERR_PARAM;
6318         }
6319
6320         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6321
6322         if (mv_f == NULL) {
6323                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6324                 return I40E_ERR_NO_MEMORY;
6325         }
6326
6327         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6328
6329         if (ret != I40E_SUCCESS)
6330                 goto DONE;
6331
6332         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6333
6334         if (ret != I40E_SUCCESS)
6335                 goto DONE;
6336
6337         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6338         if (vsi->vlan_num == 1) {
6339                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6340                 if (ret != I40E_SUCCESS)
6341                         goto DONE;
6342
6343                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6344                 if (ret != I40E_SUCCESS)
6345                         goto DONE;
6346         }
6347
6348         i40e_set_vlan_filter(vsi, vlan, 0);
6349
6350         vsi->vlan_num--;
6351         ret = I40E_SUCCESS;
6352 DONE:
6353         rte_free(mv_f);
6354         return ret;
6355 }
6356
6357 int
6358 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6359 {
6360         struct i40e_mac_filter *f;
6361         struct i40e_macvlan_filter *mv_f;
6362         int i, vlan_num = 0;
6363         int ret = I40E_SUCCESS;
6364
6365         /* If it's add and we've config it, return */
6366         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6367         if (f != NULL)
6368                 return I40E_SUCCESS;
6369         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6370                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6371
6372                 /**
6373                  * If vlan_num is 0, that's the first time to add mac,
6374                  * set mask for vlan_id 0.
6375                  */
6376                 if (vsi->vlan_num == 0) {
6377                         i40e_set_vlan_filter(vsi, 0, 1);
6378                         vsi->vlan_num = 1;
6379                 }
6380                 vlan_num = vsi->vlan_num;
6381         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6382                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6383                 vlan_num = 1;
6384
6385         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6386         if (mv_f == NULL) {
6387                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6388                 return I40E_ERR_NO_MEMORY;
6389         }
6390
6391         for (i = 0; i < vlan_num; i++) {
6392                 mv_f[i].filter_type = mac_filter->filter_type;
6393                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6394                                 ETH_ADDR_LEN);
6395         }
6396
6397         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6398                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6399                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6400                                         &mac_filter->mac_addr);
6401                 if (ret != I40E_SUCCESS)
6402                         goto DONE;
6403         }
6404
6405         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6406         if (ret != I40E_SUCCESS)
6407                 goto DONE;
6408
6409         /* Add the mac addr into mac list */
6410         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6411         if (f == NULL) {
6412                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6413                 ret = I40E_ERR_NO_MEMORY;
6414                 goto DONE;
6415         }
6416         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6417                         ETH_ADDR_LEN);
6418         f->mac_info.filter_type = mac_filter->filter_type;
6419         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6420         vsi->mac_num++;
6421
6422         ret = I40E_SUCCESS;
6423 DONE:
6424         rte_free(mv_f);
6425
6426         return ret;
6427 }
6428
6429 int
6430 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6431 {
6432         struct i40e_mac_filter *f;
6433         struct i40e_macvlan_filter *mv_f;
6434         int i, vlan_num;
6435         enum rte_mac_filter_type filter_type;
6436         int ret = I40E_SUCCESS;
6437
6438         /* Can't find it, return an error */
6439         f = i40e_find_mac_filter(vsi, addr);
6440         if (f == NULL)
6441                 return I40E_ERR_PARAM;
6442
6443         vlan_num = vsi->vlan_num;
6444         filter_type = f->mac_info.filter_type;
6445         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6446                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6447                 if (vlan_num == 0) {
6448                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
6449                         return I40E_ERR_PARAM;
6450                 }
6451         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6452                         filter_type == RTE_MAC_HASH_MATCH)
6453                 vlan_num = 1;
6454
6455         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6456         if (mv_f == NULL) {
6457                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6458                 return I40E_ERR_NO_MEMORY;
6459         }
6460
6461         for (i = 0; i < vlan_num; i++) {
6462                 mv_f[i].filter_type = filter_type;
6463                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6464                                 ETH_ADDR_LEN);
6465         }
6466         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6467                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6468                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6469                 if (ret != I40E_SUCCESS)
6470                         goto DONE;
6471         }
6472
6473         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6474         if (ret != I40E_SUCCESS)
6475                 goto DONE;
6476
6477         /* Remove the mac addr into mac list */
6478         TAILQ_REMOVE(&vsi->mac_list, f, next);
6479         rte_free(f);
6480         vsi->mac_num--;
6481
6482         ret = I40E_SUCCESS;
6483 DONE:
6484         rte_free(mv_f);
6485         return ret;
6486 }
6487
6488 /* Configure hash enable flags for RSS */
6489 uint64_t
6490 i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
6491 {
6492         uint64_t hena = 0;
6493
6494         if (!flags)
6495                 return hena;
6496
6497         if (flags & ETH_RSS_FRAG_IPV4)
6498                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
6499         if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
6500                 if (type == I40E_MAC_X722) {
6501                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
6502                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
6503                 } else
6504                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
6505         }
6506         if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
6507                 if (type == I40E_MAC_X722) {
6508                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
6509                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
6510                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
6511                 } else
6512                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
6513         }
6514         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
6515                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
6516         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
6517                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
6518         if (flags & ETH_RSS_FRAG_IPV6)
6519                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
6520         if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
6521                 if (type == I40E_MAC_X722) {
6522                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
6523                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
6524                 } else
6525                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
6526         }
6527         if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
6528                 if (type == I40E_MAC_X722) {
6529                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
6530                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
6531                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
6532                 } else
6533                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
6534         }
6535         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
6536                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
6537         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
6538                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
6539         if (flags & ETH_RSS_L2_PAYLOAD)
6540                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
6541
6542         return hena;
6543 }
6544
6545 /* Parse the hash enable flags */
6546 uint64_t
6547 i40e_parse_hena(uint64_t flags)
6548 {
6549         uint64_t rss_hf = 0;
6550
6551         if (!flags)
6552                 return rss_hf;
6553         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
6554                 rss_hf |= ETH_RSS_FRAG_IPV4;
6555         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
6556                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6557         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
6558                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6559         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
6560                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6561         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
6562                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6563         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
6564                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6565         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
6566                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
6567         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
6568                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
6569         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
6570                 rss_hf |= ETH_RSS_FRAG_IPV6;
6571         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
6572                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6573         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
6574                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6575         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
6576                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6577         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
6578                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6579         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
6580                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6581         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
6582                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
6583         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
6584                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
6585         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
6586                 rss_hf |= ETH_RSS_L2_PAYLOAD;
6587
6588         return rss_hf;
6589 }
6590
6591 /* Disable RSS */
6592 static void
6593 i40e_pf_disable_rss(struct i40e_pf *pf)
6594 {
6595         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6596         uint64_t hena;
6597
6598         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6599         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6600         if (hw->mac.type == I40E_MAC_X722)
6601                 hena &= ~I40E_RSS_HENA_ALL_X722;
6602         else
6603                 hena &= ~I40E_RSS_HENA_ALL;
6604         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6605         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6606         I40E_WRITE_FLUSH(hw);
6607 }
6608
6609 static int
6610 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6611 {
6612         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6613         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6614         int ret = 0;
6615
6616         if (!key || key_len == 0) {
6617                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6618                 return 0;
6619         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6620                 sizeof(uint32_t)) {
6621                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6622                 return -EINVAL;
6623         }
6624
6625         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6626                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6627                         (struct i40e_aqc_get_set_rss_key_data *)key;
6628
6629                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6630                 if (ret)
6631                         PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
6632         } else {
6633                 uint32_t *hash_key = (uint32_t *)key;
6634                 uint16_t i;
6635
6636                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6637                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
6638                 I40E_WRITE_FLUSH(hw);
6639         }
6640
6641         return ret;
6642 }
6643
6644 static int
6645 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6646 {
6647         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6648         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6649         int ret;
6650
6651         if (!key || !key_len)
6652                 return -EINVAL;
6653
6654         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6655                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6656                         (struct i40e_aqc_get_set_rss_key_data *)key);
6657                 if (ret) {
6658                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6659                         return ret;
6660                 }
6661         } else {
6662                 uint32_t *key_dw = (uint32_t *)key;
6663                 uint16_t i;
6664
6665                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6666                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6667         }
6668         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
6669
6670         return 0;
6671 }
6672
6673 static int
6674 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6675 {
6676         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6677         uint64_t rss_hf;
6678         uint64_t hena;
6679         int ret;
6680
6681         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6682                                rss_conf->rss_key_len);
6683         if (ret)
6684                 return ret;
6685
6686         rss_hf = rss_conf->rss_hf;
6687         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6688         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6689         if (hw->mac.type == I40E_MAC_X722)
6690                 hena &= ~I40E_RSS_HENA_ALL_X722;
6691         else
6692                 hena &= ~I40E_RSS_HENA_ALL;
6693         hena |= i40e_config_hena(rss_hf, hw->mac.type);
6694         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6695         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6696         I40E_WRITE_FLUSH(hw);
6697
6698         return 0;
6699 }
6700
6701 static int
6702 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6703                          struct rte_eth_rss_conf *rss_conf)
6704 {
6705         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6706         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6707         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
6708         uint64_t hena;
6709
6710         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6711         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6712         if (!(hena & ((hw->mac.type == I40E_MAC_X722)
6713                  ? I40E_RSS_HENA_ALL_X722
6714                  : I40E_RSS_HENA_ALL))) { /* RSS disabled */
6715                 if (rss_hf != 0) /* Enable RSS */
6716                         return -EINVAL;
6717                 return 0; /* Nothing to do */
6718         }
6719         /* RSS enabled */
6720         if (rss_hf == 0) /* Disable RSS */
6721                 return -EINVAL;
6722
6723         return i40e_hw_rss_hash_set(pf, rss_conf);
6724 }
6725
6726 static int
6727 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6728                            struct rte_eth_rss_conf *rss_conf)
6729 {
6730         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6731         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6732         uint64_t hena;
6733
6734         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6735                          &rss_conf->rss_key_len);
6736
6737         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6738         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6739         rss_conf->rss_hf = i40e_parse_hena(hena);
6740
6741         return 0;
6742 }
6743
6744 static int
6745 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6746 {
6747         switch (filter_type) {
6748         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6749                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6750                 break;
6751         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6752                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6753                 break;
6754         case RTE_TUNNEL_FILTER_IMAC_TENID:
6755                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6756                 break;
6757         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6758                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6759                 break;
6760         case ETH_TUNNEL_FILTER_IMAC:
6761                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6762                 break;
6763         case ETH_TUNNEL_FILTER_OIP:
6764                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
6765                 break;
6766         case ETH_TUNNEL_FILTER_IIP:
6767                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
6768                 break;
6769         default:
6770                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
6771                 return -EINVAL;
6772         }
6773
6774         return 0;
6775 }
6776
6777 /* Convert tunnel filter structure */
6778 static int
6779 i40e_tunnel_filter_convert(
6780         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
6781         struct i40e_tunnel_filter *tunnel_filter)
6782 {
6783         ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
6784                         (struct ether_addr *)&tunnel_filter->input.outer_mac);
6785         ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
6786                         (struct ether_addr *)&tunnel_filter->input.inner_mac);
6787         tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
6788         if ((rte_le_to_cpu_16(cld_filter->element.flags) &
6789              I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
6790             I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
6791                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
6792         else
6793                 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
6794         tunnel_filter->input.flags = cld_filter->element.flags;
6795         tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
6796         tunnel_filter->queue = cld_filter->element.queue_number;
6797         rte_memcpy(tunnel_filter->input.general_fields,
6798                    cld_filter->general_fields,
6799                    sizeof(cld_filter->general_fields));
6800
6801         return 0;
6802 }
6803
6804 /* Check if there exists the tunnel filter */
6805 struct i40e_tunnel_filter *
6806 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
6807                              const struct i40e_tunnel_filter_input *input)
6808 {
6809         int ret;
6810
6811         ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
6812         if (ret < 0)
6813                 return NULL;
6814
6815         return tunnel_rule->hash_map[ret];
6816 }
6817
6818 /* Add a tunnel filter into the SW list */
6819 static int
6820 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
6821                              struct i40e_tunnel_filter *tunnel_filter)
6822 {
6823         struct i40e_tunnel_rule *rule = &pf->tunnel;
6824         int ret;
6825
6826         ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
6827         if (ret < 0) {
6828                 PMD_DRV_LOG(ERR,
6829                             "Failed to insert tunnel filter to hash table %d!",
6830                             ret);
6831                 return ret;
6832         }
6833         rule->hash_map[ret] = tunnel_filter;
6834
6835         TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
6836
6837         return 0;
6838 }
6839
6840 /* Delete a tunnel filter from the SW list */
6841 int
6842 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
6843                           struct i40e_tunnel_filter_input *input)
6844 {
6845         struct i40e_tunnel_rule *rule = &pf->tunnel;
6846         struct i40e_tunnel_filter *tunnel_filter;
6847         int ret;
6848
6849         ret = rte_hash_del_key(rule->hash_table, input);
6850         if (ret < 0) {
6851                 PMD_DRV_LOG(ERR,
6852                             "Failed to delete tunnel filter to hash table %d!",
6853                             ret);
6854                 return ret;
6855         }
6856         tunnel_filter = rule->hash_map[ret];
6857         rule->hash_map[ret] = NULL;
6858
6859         TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
6860         rte_free(tunnel_filter);
6861
6862         return 0;
6863 }
6864
6865 int
6866 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
6867                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
6868                         uint8_t add)
6869 {
6870         uint16_t ip_type;
6871         uint32_t ipv4_addr;
6872         uint8_t i, tun_type = 0;
6873         /* internal varialbe to convert ipv6 byte order */
6874         uint32_t convert_ipv6[4];
6875         int val, ret = 0;
6876         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6877         struct i40e_vsi *vsi = pf->main_vsi;
6878         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
6879         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
6880         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
6881         struct i40e_tunnel_filter *tunnel, *node;
6882         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
6883
6884         cld_filter = rte_zmalloc("tunnel_filter",
6885                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
6886         0);
6887
6888         if (NULL == cld_filter) {
6889                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
6890                 return -ENOMEM;
6891         }
6892         pfilter = cld_filter;
6893
6894         ether_addr_copy(&tunnel_filter->outer_mac,
6895                         (struct ether_addr *)&pfilter->element.outer_mac);
6896         ether_addr_copy(&tunnel_filter->inner_mac,
6897                         (struct ether_addr *)&pfilter->element.inner_mac);
6898
6899         pfilter->element.inner_vlan =
6900                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
6901         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
6902                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
6903                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
6904                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
6905                                 &rte_cpu_to_le_32(ipv4_addr),
6906                                 sizeof(pfilter->element.ipaddr.v4.data));
6907         } else {
6908                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
6909                 for (i = 0; i < 4; i++) {
6910                         convert_ipv6[i] =
6911                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
6912                 }
6913                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
6914                            &convert_ipv6,
6915                            sizeof(pfilter->element.ipaddr.v6.data));
6916         }
6917
6918         /* check tunneled type */
6919         switch (tunnel_filter->tunnel_type) {
6920         case RTE_TUNNEL_TYPE_VXLAN:
6921                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
6922                 break;
6923         case RTE_TUNNEL_TYPE_NVGRE:
6924                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
6925                 break;
6926         case RTE_TUNNEL_TYPE_IP_IN_GRE:
6927                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
6928                 break;
6929         default:
6930                 /* Other tunnel types is not supported. */
6931                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
6932                 rte_free(cld_filter);
6933                 return -EINVAL;
6934         }
6935
6936         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
6937                                        &pfilter->element.flags);
6938         if (val < 0) {
6939                 rte_free(cld_filter);
6940                 return -EINVAL;
6941         }
6942
6943         pfilter->element.flags |= rte_cpu_to_le_16(
6944                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
6945                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
6946         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
6947         pfilter->element.queue_number =
6948                 rte_cpu_to_le_16(tunnel_filter->queue_id);
6949
6950         /* Check if there is the filter in SW list */
6951         memset(&check_filter, 0, sizeof(check_filter));
6952         i40e_tunnel_filter_convert(cld_filter, &check_filter);
6953         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
6954         if (add && node) {
6955                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
6956                 return -EINVAL;
6957         }
6958
6959         if (!add && !node) {
6960                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
6961                 return -EINVAL;
6962         }
6963
6964         if (add) {
6965                 ret = i40e_aq_add_cloud_filters(hw,
6966                                         vsi->seid, &cld_filter->element, 1);
6967                 if (ret < 0) {
6968                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
6969                         return -ENOTSUP;
6970                 }
6971                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
6972                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
6973                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
6974         } else {
6975                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
6976                                                    &cld_filter->element, 1);
6977                 if (ret < 0) {
6978                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
6979                         return -ENOTSUP;
6980                 }
6981                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
6982         }
6983
6984         rte_free(cld_filter);
6985         return ret;
6986 }
6987
6988 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
6989 #define I40E_TR_VXLAN_GRE_KEY_MASK              0x4
6990 #define I40E_TR_GENEVE_KEY_MASK                 0x8
6991 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK         0x40
6992 #define I40E_TR_GRE_KEY_MASK                    0x400
6993 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK          0x800
6994 #define I40E_TR_GRE_NO_KEY_MASK                 0x8000
6995
6996 static enum
6997 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
6998 {
6999         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7000         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7001         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7002         enum i40e_status_code status = I40E_SUCCESS;
7003
7004         memset(&filter_replace, 0,
7005                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7006         memset(&filter_replace_buf, 0,
7007                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7008
7009         /* create L1 filter */
7010         filter_replace.old_filter_type =
7011                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7012         filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
7013         filter_replace.tr_bit = 0;
7014
7015         /* Prepare the buffer, 3 entries */
7016         filter_replace_buf.data[0] =
7017                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7018         filter_replace_buf.data[0] |=
7019                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7020         filter_replace_buf.data[2] = 0xFF;
7021         filter_replace_buf.data[3] = 0xFF;
7022         filter_replace_buf.data[4] =
7023                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7024         filter_replace_buf.data[4] |=
7025                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7026         filter_replace_buf.data[7] = 0xF0;
7027         filter_replace_buf.data[8]
7028                 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7029         filter_replace_buf.data[8] |=
7030                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7031         filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7032                 I40E_TR_GENEVE_KEY_MASK |
7033                 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7034         filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7035                 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7036                 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7037
7038         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7039                                                &filter_replace_buf);
7040         return status;
7041 }
7042
7043 static enum
7044 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7045 {
7046         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7047         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7048         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7049         enum i40e_status_code status = I40E_SUCCESS;
7050
7051         /* For MPLSoUDP */
7052         memset(&filter_replace, 0,
7053                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7054         memset(&filter_replace_buf, 0,
7055                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7056         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7057                 I40E_AQC_MIRROR_CLOUD_FILTER;
7058         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7059         filter_replace.new_filter_type =
7060                 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
7061         /* Prepare the buffer, 2 entries */
7062         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7063         filter_replace_buf.data[0] |=
7064                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7065         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
7066         filter_replace_buf.data[4] |=
7067                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7068         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7069                                                &filter_replace_buf);
7070         if (status < 0)
7071                 return status;
7072
7073         /* For MPLSoGRE */
7074         memset(&filter_replace, 0,
7075                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7076         memset(&filter_replace_buf, 0,
7077                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7078
7079         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7080                 I40E_AQC_MIRROR_CLOUD_FILTER;
7081         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7082         filter_replace.new_filter_type =
7083                 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
7084         /* Prepare the buffer, 2 entries */
7085         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7086         filter_replace_buf.data[0] |=
7087                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7088         filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
7089         filter_replace_buf.data[4] |=
7090                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7091
7092         status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7093                                                &filter_replace_buf);
7094         return status;
7095 }
7096
7097 int
7098 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7099                       struct i40e_tunnel_filter_conf *tunnel_filter,
7100                       uint8_t add)
7101 {
7102         uint16_t ip_type;
7103         uint32_t ipv4_addr;
7104         uint8_t i, tun_type = 0;
7105         /* internal variable to convert ipv6 byte order */
7106         uint32_t convert_ipv6[4];
7107         int val, ret = 0;
7108         struct i40e_pf_vf *vf = NULL;
7109         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7110         struct i40e_vsi *vsi;
7111         struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7112         struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7113         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7114         struct i40e_tunnel_filter *tunnel, *node;
7115         struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7116         uint32_t teid_le;
7117         bool big_buffer = 0;
7118
7119         cld_filter = rte_zmalloc("tunnel_filter",
7120                          sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7121                          0);
7122
7123         if (cld_filter == NULL) {
7124                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7125                 return -ENOMEM;
7126         }
7127         pfilter = cld_filter;
7128
7129         ether_addr_copy(&tunnel_filter->outer_mac,
7130                         (struct ether_addr *)&pfilter->element.outer_mac);
7131         ether_addr_copy(&tunnel_filter->inner_mac,
7132                         (struct ether_addr *)&pfilter->element.inner_mac);
7133
7134         pfilter->element.inner_vlan =
7135                 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7136         if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7137                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7138                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7139                 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7140                                 &rte_cpu_to_le_32(ipv4_addr),
7141                                 sizeof(pfilter->element.ipaddr.v4.data));
7142         } else {
7143                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7144                 for (i = 0; i < 4; i++) {
7145                         convert_ipv6[i] =
7146                         rte_cpu_to_le_32(rte_be_to_cpu_32(
7147                                          tunnel_filter->ip_addr.ipv6_addr[i]));
7148                 }
7149                 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7150                            &convert_ipv6,
7151                            sizeof(pfilter->element.ipaddr.v6.data));
7152         }
7153
7154         /* check tunneled type */
7155         switch (tunnel_filter->tunnel_type) {
7156         case I40E_TUNNEL_TYPE_VXLAN:
7157                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7158                 break;
7159         case I40E_TUNNEL_TYPE_NVGRE:
7160                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7161                 break;
7162         case I40E_TUNNEL_TYPE_IP_IN_GRE:
7163                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7164                 break;
7165         case I40E_TUNNEL_TYPE_MPLSoUDP:
7166                 if (!pf->mpls_replace_flag) {
7167                         i40e_replace_mpls_l1_filter(pf);
7168                         i40e_replace_mpls_cloud_filter(pf);
7169                         pf->mpls_replace_flag = 1;
7170                 }
7171                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7172                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7173                         teid_le >> 4;
7174                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7175                         (teid_le & 0xF) << 12;
7176                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7177                         0x40;
7178                 big_buffer = 1;
7179                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
7180                 break;
7181         case I40E_TUNNEL_TYPE_MPLSoGRE:
7182                 if (!pf->mpls_replace_flag) {
7183                         i40e_replace_mpls_l1_filter(pf);
7184                         i40e_replace_mpls_cloud_filter(pf);
7185                         pf->mpls_replace_flag = 1;
7186                 }
7187                 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7188                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7189                         teid_le >> 4;
7190                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7191                         (teid_le & 0xF) << 12;
7192                 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7193                         0x0;
7194                 big_buffer = 1;
7195                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
7196                 break;
7197         case I40E_TUNNEL_TYPE_QINQ:
7198                 if (!pf->qinq_replace_flag) {
7199                         ret = i40e_cloud_filter_qinq_create(pf);
7200                         if (ret < 0)
7201                                 PMD_DRV_LOG(DEBUG,
7202                                             "QinQ tunnel filter already created.");
7203                         pf->qinq_replace_flag = 1;
7204                 }
7205                 /*      Add in the General fields the values of
7206                  *      the Outer and Inner VLAN
7207                  *      Big Buffer should be set, see changes in
7208                  *      i40e_aq_add_cloud_filters
7209                  */
7210                 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
7211                 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
7212                 big_buffer = 1;
7213                 break;
7214         default:
7215                 /* Other tunnel types is not supported. */
7216                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7217                 rte_free(cld_filter);
7218                 return -EINVAL;
7219         }
7220
7221         if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
7222                 pfilter->element.flags =
7223                         I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
7224         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
7225                 pfilter->element.flags =
7226                         I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
7227         else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
7228                 pfilter->element.flags |=
7229                         I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
7230         else {
7231                 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7232                                                 &pfilter->element.flags);
7233                 if (val < 0) {
7234                         rte_free(cld_filter);
7235                         return -EINVAL;
7236                 }
7237         }
7238
7239         pfilter->element.flags |= rte_cpu_to_le_16(
7240                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7241                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7242         pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7243         pfilter->element.queue_number =
7244                 rte_cpu_to_le_16(tunnel_filter->queue_id);
7245
7246         if (!tunnel_filter->is_to_vf)
7247                 vsi = pf->main_vsi;
7248         else {
7249                 if (tunnel_filter->vf_id >= pf->vf_num) {
7250                         PMD_DRV_LOG(ERR, "Invalid argument.");
7251                         return -EINVAL;
7252                 }
7253                 vf = &pf->vfs[tunnel_filter->vf_id];
7254                 vsi = vf->vsi;
7255         }
7256
7257         /* Check if there is the filter in SW list */
7258         memset(&check_filter, 0, sizeof(check_filter));
7259         i40e_tunnel_filter_convert(cld_filter, &check_filter);
7260         check_filter.is_to_vf = tunnel_filter->is_to_vf;
7261         check_filter.vf_id = tunnel_filter->vf_id;
7262         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7263         if (add && node) {
7264                 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7265                 return -EINVAL;
7266         }
7267
7268         if (!add && !node) {
7269                 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7270                 return -EINVAL;
7271         }
7272
7273         if (add) {
7274                 if (big_buffer)
7275                         ret = i40e_aq_add_cloud_filters_big_buffer(hw,
7276                                                    vsi->seid, cld_filter, 1);
7277                 else
7278                         ret = i40e_aq_add_cloud_filters(hw,
7279                                         vsi->seid, &cld_filter->element, 1);
7280                 if (ret < 0) {
7281                         PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7282                         return -ENOTSUP;
7283                 }
7284                 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7285                 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7286                 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7287         } else {
7288                 if (big_buffer)
7289                         ret = i40e_aq_remove_cloud_filters_big_buffer(
7290                                 hw, vsi->seid, cld_filter, 1);
7291                 else
7292                         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7293                                                    &cld_filter->element, 1);
7294                 if (ret < 0) {
7295                         PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7296                         return -ENOTSUP;
7297                 }
7298                 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7299         }
7300
7301         rte_free(cld_filter);
7302         return ret;
7303 }
7304
7305 static int
7306 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
7307 {
7308         uint8_t i;
7309
7310         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7311                 if (pf->vxlan_ports[i] == port)
7312                         return i;
7313         }
7314
7315         return -1;
7316 }
7317
7318 static int
7319 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
7320 {
7321         int  idx, ret;
7322         uint8_t filter_idx;
7323         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7324
7325         idx = i40e_get_vxlan_port_idx(pf, port);
7326
7327         /* Check if port already exists */
7328         if (idx >= 0) {
7329                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
7330                 return -EINVAL;
7331         }
7332
7333         /* Now check if there is space to add the new port */
7334         idx = i40e_get_vxlan_port_idx(pf, 0);
7335         if (idx < 0) {
7336                 PMD_DRV_LOG(ERR,
7337                         "Maximum number of UDP ports reached, not adding port %d",
7338                         port);
7339                 return -ENOSPC;
7340         }
7341
7342         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
7343                                         &filter_idx, NULL);
7344         if (ret < 0) {
7345                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
7346                 return -1;
7347         }
7348
7349         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
7350                          port,  filter_idx);
7351
7352         /* New port: add it and mark its index in the bitmap */
7353         pf->vxlan_ports[idx] = port;
7354         pf->vxlan_bitmap |= (1 << idx);
7355
7356         if (!(pf->flags & I40E_FLAG_VXLAN))
7357                 pf->flags |= I40E_FLAG_VXLAN;
7358
7359         return 0;
7360 }
7361
7362 static int
7363 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
7364 {
7365         int idx;
7366         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7367
7368         if (!(pf->flags & I40E_FLAG_VXLAN)) {
7369                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
7370                 return -EINVAL;
7371         }
7372
7373         idx = i40e_get_vxlan_port_idx(pf, port);
7374
7375         if (idx < 0) {
7376                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
7377                 return -EINVAL;
7378         }
7379
7380         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
7381                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
7382                 return -1;
7383         }
7384
7385         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
7386                         port, idx);
7387
7388         pf->vxlan_ports[idx] = 0;
7389         pf->vxlan_bitmap &= ~(1 << idx);
7390
7391         if (!pf->vxlan_bitmap)
7392                 pf->flags &= ~I40E_FLAG_VXLAN;
7393
7394         return 0;
7395 }
7396
7397 /* Add UDP tunneling port */
7398 static int
7399 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7400                              struct rte_eth_udp_tunnel *udp_tunnel)
7401 {
7402         int ret = 0;
7403         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7404
7405         if (udp_tunnel == NULL)
7406                 return -EINVAL;
7407
7408         switch (udp_tunnel->prot_type) {
7409         case RTE_TUNNEL_TYPE_VXLAN:
7410                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
7411                 break;
7412
7413         case RTE_TUNNEL_TYPE_GENEVE:
7414         case RTE_TUNNEL_TYPE_TEREDO:
7415                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7416                 ret = -1;
7417                 break;
7418
7419         default:
7420                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7421                 ret = -1;
7422                 break;
7423         }
7424
7425         return ret;
7426 }
7427
7428 /* Remove UDP tunneling port */
7429 static int
7430 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7431                              struct rte_eth_udp_tunnel *udp_tunnel)
7432 {
7433         int ret = 0;
7434         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7435
7436         if (udp_tunnel == NULL)
7437                 return -EINVAL;
7438
7439         switch (udp_tunnel->prot_type) {
7440         case RTE_TUNNEL_TYPE_VXLAN:
7441                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
7442                 break;
7443         case RTE_TUNNEL_TYPE_GENEVE:
7444         case RTE_TUNNEL_TYPE_TEREDO:
7445                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7446                 ret = -1;
7447                 break;
7448         default:
7449                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
7450                 ret = -1;
7451                 break;
7452         }
7453
7454         return ret;
7455 }
7456
7457 /* Calculate the maximum number of contiguous PF queues that are configured */
7458 static int
7459 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
7460 {
7461         struct rte_eth_dev_data *data = pf->dev_data;
7462         int i, num;
7463         struct i40e_rx_queue *rxq;
7464
7465         num = 0;
7466         for (i = 0; i < pf->lan_nb_qps; i++) {
7467                 rxq = data->rx_queues[i];
7468                 if (rxq && rxq->q_set)
7469                         num++;
7470                 else
7471                         break;
7472         }
7473
7474         return num;
7475 }
7476
7477 /* Configure RSS */
7478 static int
7479 i40e_pf_config_rss(struct i40e_pf *pf)
7480 {
7481         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7482         struct rte_eth_rss_conf rss_conf;
7483         uint32_t i, lut = 0;
7484         uint16_t j, num;
7485
7486         /*
7487          * If both VMDQ and RSS enabled, not all of PF queues are configured.
7488          * It's necessary to calculate the actual PF queues that are configured.
7489          */
7490         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
7491                 num = i40e_pf_calc_configured_queues_num(pf);
7492         else
7493                 num = pf->dev_data->nb_rx_queues;
7494
7495         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
7496         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
7497                         num);
7498
7499         if (num == 0) {
7500                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
7501                 return -ENOTSUP;
7502         }
7503
7504         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
7505                 if (j == num)
7506                         j = 0;
7507                 lut = (lut << 8) | (j & ((0x1 <<
7508                         hw->func_caps.rss_table_entry_width) - 1));
7509                 if ((i & 3) == 3)
7510                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
7511         }
7512
7513         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
7514         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
7515                 i40e_pf_disable_rss(pf);
7516                 return 0;
7517         }
7518         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
7519                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
7520                 /* Random default keys */
7521                 static uint32_t rss_key_default[] = {0x6b793944,
7522                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
7523                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
7524                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
7525
7526                 rss_conf.rss_key = (uint8_t *)rss_key_default;
7527                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7528                                                         sizeof(uint32_t);
7529         }
7530
7531         return i40e_hw_rss_hash_set(pf, &rss_conf);
7532 }
7533
7534 static int
7535 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
7536                                struct rte_eth_tunnel_filter_conf *filter)
7537 {
7538         if (pf == NULL || filter == NULL) {
7539                 PMD_DRV_LOG(ERR, "Invalid parameter");
7540                 return -EINVAL;
7541         }
7542
7543         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
7544                 PMD_DRV_LOG(ERR, "Invalid queue ID");
7545                 return -EINVAL;
7546         }
7547
7548         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
7549                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
7550                 return -EINVAL;
7551         }
7552
7553         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
7554                 (is_zero_ether_addr(&filter->outer_mac))) {
7555                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
7556                 return -EINVAL;
7557         }
7558
7559         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
7560                 (is_zero_ether_addr(&filter->inner_mac))) {
7561                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
7562                 return -EINVAL;
7563         }
7564
7565         return 0;
7566 }
7567
7568 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
7569 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
7570 static int
7571 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
7572 {
7573         uint32_t val, reg;
7574         int ret = -EINVAL;
7575
7576         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
7577         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
7578
7579         if (len == 3) {
7580                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
7581         } else if (len == 4) {
7582                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
7583         } else {
7584                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
7585                 return ret;
7586         }
7587
7588         if (reg != val) {
7589                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
7590                                                    reg, NULL);
7591                 if (ret != 0)
7592                         return ret;
7593         } else {
7594                 ret = 0;
7595         }
7596         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
7597                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
7598
7599         return ret;
7600 }
7601
7602 static int
7603 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
7604 {
7605         int ret = -EINVAL;
7606
7607         if (!hw || !cfg)
7608                 return -EINVAL;
7609
7610         switch (cfg->cfg_type) {
7611         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
7612                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
7613                 break;
7614         default:
7615                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
7616                 break;
7617         }
7618
7619         return ret;
7620 }
7621
7622 static int
7623 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
7624                                enum rte_filter_op filter_op,
7625                                void *arg)
7626 {
7627         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7628         int ret = I40E_ERR_PARAM;
7629
7630         switch (filter_op) {
7631         case RTE_ETH_FILTER_SET:
7632                 ret = i40e_dev_global_config_set(hw,
7633                         (struct rte_eth_global_cfg *)arg);
7634                 break;
7635         default:
7636                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
7637                 break;
7638         }
7639
7640         return ret;
7641 }
7642
7643 static int
7644 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
7645                           enum rte_filter_op filter_op,
7646                           void *arg)
7647 {
7648         struct rte_eth_tunnel_filter_conf *filter;
7649         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7650         int ret = I40E_SUCCESS;
7651
7652         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
7653
7654         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
7655                 return I40E_ERR_PARAM;
7656
7657         switch (filter_op) {
7658         case RTE_ETH_FILTER_NOP:
7659                 if (!(pf->flags & I40E_FLAG_VXLAN))
7660                         ret = I40E_NOT_SUPPORTED;
7661                 break;
7662         case RTE_ETH_FILTER_ADD:
7663                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
7664                 break;
7665         case RTE_ETH_FILTER_DELETE:
7666                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
7667                 break;
7668         default:
7669                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
7670                 ret = I40E_ERR_PARAM;
7671                 break;
7672         }
7673
7674         return ret;
7675 }
7676
7677 static int
7678 i40e_pf_config_mq_rx(struct i40e_pf *pf)
7679 {
7680         int ret = 0;
7681         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
7682
7683         /* RSS setup */
7684         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
7685                 ret = i40e_pf_config_rss(pf);
7686         else
7687                 i40e_pf_disable_rss(pf);
7688
7689         return ret;
7690 }
7691
7692 /* Get the symmetric hash enable configurations per port */
7693 static void
7694 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
7695 {
7696         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
7697
7698         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
7699 }
7700
7701 /* Set the symmetric hash enable configurations per port */
7702 static void
7703 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
7704 {
7705         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
7706
7707         if (enable > 0) {
7708                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
7709                         PMD_DRV_LOG(INFO,
7710                                 "Symmetric hash has already been enabled");
7711                         return;
7712                 }
7713                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
7714         } else {
7715                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
7716                         PMD_DRV_LOG(INFO,
7717                                 "Symmetric hash has already been disabled");
7718                         return;
7719                 }
7720                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
7721         }
7722         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
7723         I40E_WRITE_FLUSH(hw);
7724 }
7725
7726 /*
7727  * Get global configurations of hash function type and symmetric hash enable
7728  * per flow type (pctype). Note that global configuration means it affects all
7729  * the ports on the same NIC.
7730  */
7731 static int
7732 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
7733                                    struct rte_eth_hash_global_conf *g_cfg)
7734 {
7735         uint32_t reg, mask = I40E_FLOW_TYPES;
7736         uint16_t i;
7737         enum i40e_filter_pctype pctype;
7738
7739         memset(g_cfg, 0, sizeof(*g_cfg));
7740         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7741         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
7742                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
7743         else
7744                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
7745         PMD_DRV_LOG(DEBUG, "Hash function is %s",
7746                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
7747
7748         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
7749                 if (!(mask & (1UL << i)))
7750                         continue;
7751                 mask &= ~(1UL << i);
7752                 /* Bit set indicats the coresponding flow type is supported */
7753                 g_cfg->valid_bit_mask[0] |= (1UL << i);
7754                 /* if flowtype is invalid, continue */
7755                 if (!I40E_VALID_FLOW(i))
7756                         continue;
7757                 pctype = i40e_flowtype_to_pctype(i);
7758                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
7759                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
7760                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
7761         }
7762
7763         return 0;
7764 }
7765
7766 static int
7767 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
7768 {
7769         uint32_t i;
7770         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
7771
7772         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
7773                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
7774                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
7775                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
7776                                                 g_cfg->hash_func);
7777                 return -EINVAL;
7778         }
7779
7780         /*
7781          * As i40e supports less than 32 flow types, only first 32 bits need to
7782          * be checked.
7783          */
7784         mask0 = g_cfg->valid_bit_mask[0];
7785         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
7786                 if (i == 0) {
7787                         /* Check if any unsupported flow type configured */
7788                         if ((mask0 | i40e_mask) ^ i40e_mask)
7789                                 goto mask_err;
7790                 } else {
7791                         if (g_cfg->valid_bit_mask[i])
7792                                 goto mask_err;
7793                 }
7794         }
7795
7796         return 0;
7797
7798 mask_err:
7799         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
7800
7801         return -EINVAL;
7802 }
7803
7804 /*
7805  * Set global configurations of hash function type and symmetric hash enable
7806  * per flow type (pctype). Note any modifying global configuration will affect
7807  * all the ports on the same NIC.
7808  */
7809 static int
7810 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
7811                                    struct rte_eth_hash_global_conf *g_cfg)
7812 {
7813         int ret;
7814         uint16_t i;
7815         uint32_t reg;
7816         uint32_t mask0 = g_cfg->valid_bit_mask[0];
7817         enum i40e_filter_pctype pctype;
7818
7819         /* Check the input parameters */
7820         ret = i40e_hash_global_config_check(g_cfg);
7821         if (ret < 0)
7822                 return ret;
7823
7824         for (i = 0; mask0 && i < UINT32_BIT; i++) {
7825                 if (!(mask0 & (1UL << i)))
7826                         continue;
7827                 mask0 &= ~(1UL << i);
7828                 /* if flowtype is invalid, continue */
7829                 if (!I40E_VALID_FLOW(i))
7830                         continue;
7831                 pctype = i40e_flowtype_to_pctype(i);
7832                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
7833                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
7834                 if (hw->mac.type == I40E_MAC_X722) {
7835                         if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
7836                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7837                                   I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
7838                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7839                                   I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
7840                                   reg);
7841                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7842                                   I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
7843                                   reg);
7844                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
7845                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7846                                   I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
7847                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7848                                   I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
7849                                   reg);
7850                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
7851                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7852                                   I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
7853                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7854                                   I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
7855                                   reg);
7856                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7857                                   I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
7858                                   reg);
7859                         } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
7860                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7861                                   I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
7862                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
7863                                   I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
7864                                   reg);
7865                         } else {
7866                                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
7867                                   reg);
7868                         }
7869                 } else {
7870                         i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
7871                 }
7872         }
7873
7874         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7875         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
7876                 /* Toeplitz */
7877                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
7878                         PMD_DRV_LOG(DEBUG,
7879                                 "Hash function already set to Toeplitz");
7880                         goto out;
7881                 }
7882                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
7883         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
7884                 /* Simple XOR */
7885                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
7886                         PMD_DRV_LOG(DEBUG,
7887                                 "Hash function already set to Simple XOR");
7888                         goto out;
7889                 }
7890                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
7891         } else
7892                 /* Use the default, and keep it as it is */
7893                 goto out;
7894
7895         i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
7896
7897 out:
7898         I40E_WRITE_FLUSH(hw);
7899
7900         return 0;
7901 }
7902
7903 /**
7904  * Valid input sets for hash and flow director filters per PCTYPE
7905  */
7906 static uint64_t
7907 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
7908                 enum rte_filter_type filter)
7909 {
7910         uint64_t valid;
7911
7912         static const uint64_t valid_hash_inset_table[] = {
7913                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7914                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7915                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7916                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
7917                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
7918                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7919                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7920                         I40E_INSET_FLEX_PAYLOAD,
7921                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7922                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7923                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7924                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7925                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7926                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7927                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7928                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7929                         I40E_INSET_FLEX_PAYLOAD,
7930                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7931                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7932                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7933                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7934                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7935                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7936                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7937                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7938                         I40E_INSET_FLEX_PAYLOAD,
7939                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7940                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7941                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7942                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7943                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7944                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7945                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7946                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7947                         I40E_INSET_FLEX_PAYLOAD,
7948                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7949                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7950                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7951                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7952                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7953                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7954                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7955                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7956                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7957                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7958                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7959                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7960                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7961                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7962                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7963                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7964                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7965                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7966                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7967                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7968                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7969                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7970                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7971                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7972                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7973                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7974                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
7975                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7976                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7977                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7978                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7979                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7980                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7981                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7982                         I40E_INSET_FLEX_PAYLOAD,
7983                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
7984                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7985                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7986                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7987                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7988                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
7989                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
7990                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
7991                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7992                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7993                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7994                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7995                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7996                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7997                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7998                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
7999                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8000                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8001                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8002                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8003                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8004                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8005                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8006                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8007                         I40E_INSET_FLEX_PAYLOAD,
8008                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8009                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8010                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8011                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8012                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8013                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8014                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8015                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8016                         I40E_INSET_FLEX_PAYLOAD,
8017                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8018                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8019                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8020                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8021                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8022                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8023                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8024                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8025                         I40E_INSET_FLEX_PAYLOAD,
8026                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8027                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8028                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8029                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8030                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8031                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8032                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8033                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8034                         I40E_INSET_FLEX_PAYLOAD,
8035                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8036                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8037                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8038                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8039                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8040                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8041                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8042                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8043                         I40E_INSET_FLEX_PAYLOAD,
8044                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8045                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8046                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8047                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8048                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8049                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8050                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8051                         I40E_INSET_FLEX_PAYLOAD,
8052                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8053                         I40E_INSET_DMAC | I40E_INSET_SMAC |
8054                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8055                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8056                         I40E_INSET_FLEX_PAYLOAD,
8057         };
8058
8059         /**
8060          * Flow director supports only fields defined in
8061          * union rte_eth_fdir_flow.
8062          */
8063         static const uint64_t valid_fdir_inset_table[] = {
8064                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8065                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8066                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8067                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8068                 I40E_INSET_IPV4_TTL,
8069                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8070                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8071                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8072                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8073                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8074                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8075                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8076                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8077                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8078                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8079                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8080                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8081                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8082                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8083                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8084                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8085                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8086                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8087                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8088                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8089                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8090                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8091                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8092                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8093                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8094                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8095                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8096                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8097                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8098                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8099                 I40E_INSET_SCTP_VT,
8100                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8101                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8102                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8103                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8104                 I40E_INSET_IPV4_TTL,
8105                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8106                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8107                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8108                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8109                 I40E_INSET_IPV6_HOP_LIMIT,
8110                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8111                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8112                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8113                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8114                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8115                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8116                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8117                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8118                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8119                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8120                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8121                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8122                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8123                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8124                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8125                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8126                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8127                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8128                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8129                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8130                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8131                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8132                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8133                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8134                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8135                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8136                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8137                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8138                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8139                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8140                 I40E_INSET_SCTP_VT,
8141                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8142                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8143                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8144                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8145                 I40E_INSET_IPV6_HOP_LIMIT,
8146                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8147                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8148                 I40E_INSET_LAST_ETHER_TYPE,
8149         };
8150
8151         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8152                 return 0;
8153         if (filter == RTE_ETH_FILTER_HASH)
8154                 valid = valid_hash_inset_table[pctype];
8155         else
8156                 valid = valid_fdir_inset_table[pctype];
8157
8158         return valid;
8159 }
8160
8161 /**
8162  * Validate if the input set is allowed for a specific PCTYPE
8163  */
8164 int
8165 i40e_validate_input_set(enum i40e_filter_pctype pctype,
8166                 enum rte_filter_type filter, uint64_t inset)
8167 {
8168         uint64_t valid;
8169
8170         valid = i40e_get_valid_input_set(pctype, filter);
8171         if (inset & (~valid))
8172                 return -EINVAL;
8173
8174         return 0;
8175 }
8176
8177 /* default input set fields combination per pctype */
8178 uint64_t
8179 i40e_get_default_input_set(uint16_t pctype)
8180 {
8181         static const uint64_t default_inset_table[] = {
8182                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8183                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8184                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8185                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8186                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8187                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8188                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8189                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8190                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8191                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8192                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8193                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8194                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8195                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8196                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8197                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8198                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8199                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8200                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8201                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8202                         I40E_INSET_SCTP_VT,
8203                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8204                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
8205                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8206                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8207                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8208                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8209                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8210                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8211                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8212                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8213                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8214                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8215                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8216                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8217                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8218                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8219                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8220                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8221                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8222                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8223                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8224                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8225                         I40E_INSET_SCTP_VT,
8226                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8227                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
8228                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8229                         I40E_INSET_LAST_ETHER_TYPE,
8230         };
8231
8232         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
8233                 return 0;
8234
8235         return default_inset_table[pctype];
8236 }
8237
8238 /**
8239  * Parse the input set from index to logical bit masks
8240  */
8241 static int
8242 i40e_parse_input_set(uint64_t *inset,
8243                      enum i40e_filter_pctype pctype,
8244                      enum rte_eth_input_set_field *field,
8245                      uint16_t size)
8246 {
8247         uint16_t i, j;
8248         int ret = -EINVAL;
8249
8250         static const struct {
8251                 enum rte_eth_input_set_field field;
8252                 uint64_t inset;
8253         } inset_convert_table[] = {
8254                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
8255                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
8256                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
8257                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
8258                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
8259                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
8260                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
8261                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
8262                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
8263                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
8264                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
8265                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
8266                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
8267                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
8268                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
8269                         I40E_INSET_IPV6_NEXT_HDR},
8270                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
8271                         I40E_INSET_IPV6_HOP_LIMIT},
8272                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
8273                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
8274                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
8275                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
8276                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
8277                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
8278                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
8279                         I40E_INSET_SCTP_VT},
8280                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
8281                         I40E_INSET_TUNNEL_DMAC},
8282                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
8283                         I40E_INSET_VLAN_TUNNEL},
8284                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
8285                         I40E_INSET_TUNNEL_ID},
8286                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
8287                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
8288                         I40E_INSET_FLEX_PAYLOAD_W1},
8289                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
8290                         I40E_INSET_FLEX_PAYLOAD_W2},
8291                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
8292                         I40E_INSET_FLEX_PAYLOAD_W3},
8293                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
8294                         I40E_INSET_FLEX_PAYLOAD_W4},
8295                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
8296                         I40E_INSET_FLEX_PAYLOAD_W5},
8297                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
8298                         I40E_INSET_FLEX_PAYLOAD_W6},
8299                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
8300                         I40E_INSET_FLEX_PAYLOAD_W7},
8301                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
8302                         I40E_INSET_FLEX_PAYLOAD_W8},
8303         };
8304
8305         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
8306                 return ret;
8307
8308         /* Only one item allowed for default or all */
8309         if (size == 1) {
8310                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
8311                         *inset = i40e_get_default_input_set(pctype);
8312                         return 0;
8313                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
8314                         *inset = I40E_INSET_NONE;
8315                         return 0;
8316                 }
8317         }
8318
8319         for (i = 0, *inset = 0; i < size; i++) {
8320                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
8321                         if (field[i] == inset_convert_table[j].field) {
8322                                 *inset |= inset_convert_table[j].inset;
8323                                 break;
8324                         }
8325                 }
8326
8327                 /* It contains unsupported input set, return immediately */
8328                 if (j == RTE_DIM(inset_convert_table))
8329                         return ret;
8330         }
8331
8332         return 0;
8333 }
8334
8335 /**
8336  * Translate the input set from bit masks to register aware bit masks
8337  * and vice versa
8338  */
8339 uint64_t
8340 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
8341 {
8342         uint64_t val = 0;
8343         uint16_t i;
8344
8345         struct inset_map {
8346                 uint64_t inset;
8347                 uint64_t inset_reg;
8348         };
8349
8350         static const struct inset_map inset_map_common[] = {
8351                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
8352                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
8353                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
8354                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
8355                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
8356                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
8357                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
8358                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
8359                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
8360                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
8361                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
8362                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
8363                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
8364                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
8365                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
8366                 {I40E_INSET_TUNNEL_DMAC,
8367                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
8368                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
8369                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
8370                 {I40E_INSET_TUNNEL_SRC_PORT,
8371                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
8372                 {I40E_INSET_TUNNEL_DST_PORT,
8373                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
8374                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
8375                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
8376                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
8377                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
8378                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
8379                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
8380                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
8381                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
8382                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
8383         };
8384
8385     /* some different registers map in x722*/
8386         static const struct inset_map inset_map_diff_x722[] = {
8387                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
8388                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
8389                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
8390                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
8391         };
8392
8393         static const struct inset_map inset_map_diff_not_x722[] = {
8394                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
8395                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
8396                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
8397                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
8398         };
8399
8400         if (input == 0)
8401                 return val;
8402
8403         /* Translate input set to register aware inset */
8404         if (type == I40E_MAC_X722) {
8405                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
8406                         if (input & inset_map_diff_x722[i].inset)
8407                                 val |= inset_map_diff_x722[i].inset_reg;
8408                 }
8409         } else {
8410                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
8411                         if (input & inset_map_diff_not_x722[i].inset)
8412                                 val |= inset_map_diff_not_x722[i].inset_reg;
8413                 }
8414         }
8415
8416         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
8417                 if (input & inset_map_common[i].inset)
8418                         val |= inset_map_common[i].inset_reg;
8419         }
8420
8421         return val;
8422 }
8423
8424 int
8425 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
8426 {
8427         uint8_t i, idx = 0;
8428         uint64_t inset_need_mask = inset;
8429
8430         static const struct {
8431                 uint64_t inset;
8432                 uint32_t mask;
8433         } inset_mask_map[] = {
8434                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
8435                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
8436                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
8437                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
8438                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
8439                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
8440                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
8441                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
8442         };
8443
8444         if (!inset || !mask || !nb_elem)
8445                 return 0;
8446
8447         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8448                 /* Clear the inset bit, if no MASK is required,
8449                  * for example proto + ttl
8450                  */
8451                 if ((inset & inset_mask_map[i].inset) ==
8452                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
8453                         inset_need_mask &= ~inset_mask_map[i].inset;
8454                 if (!inset_need_mask)
8455                         return 0;
8456         }
8457         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
8458                 if ((inset_need_mask & inset_mask_map[i].inset) ==
8459                     inset_mask_map[i].inset) {
8460                         if (idx >= nb_elem) {
8461                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
8462                                 return -EINVAL;
8463                         }
8464                         mask[idx] = inset_mask_map[i].mask;
8465                         idx++;
8466                 }
8467         }
8468
8469         return idx;
8470 }
8471
8472 void
8473 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
8474 {
8475         uint32_t reg = i40e_read_rx_ctl(hw, addr);
8476
8477         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
8478         if (reg != val)
8479                 i40e_write_rx_ctl(hw, addr, val);
8480         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
8481                     (uint32_t)i40e_read_rx_ctl(hw, addr));
8482 }
8483
8484 static void
8485 i40e_filter_input_set_init(struct i40e_pf *pf)
8486 {
8487         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8488         enum i40e_filter_pctype pctype;
8489         uint64_t input_set, inset_reg;
8490         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8491         int num, i;
8492
8493         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
8494              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
8495                 if (hw->mac.type == I40E_MAC_X722) {
8496                         if (!I40E_VALID_PCTYPE_X722(pctype))
8497                                 continue;
8498                 } else {
8499                         if (!I40E_VALID_PCTYPE(pctype))
8500                                 continue;
8501                 }
8502
8503                 input_set = i40e_get_default_input_set(pctype);
8504
8505                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8506                                                    I40E_INSET_MASK_NUM_REG);
8507                 if (num < 0)
8508                         return;
8509                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
8510                                         input_set);
8511
8512                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8513                                       (uint32_t)(inset_reg & UINT32_MAX));
8514                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8515                                      (uint32_t)((inset_reg >>
8516                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8517                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8518                                       (uint32_t)(inset_reg & UINT32_MAX));
8519                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8520                                      (uint32_t)((inset_reg >>
8521                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
8522
8523                 for (i = 0; i < num; i++) {
8524                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8525                                              mask_reg[i]);
8526                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8527                                              mask_reg[i]);
8528                 }
8529                 /*clear unused mask registers of the pctype */
8530                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
8531                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8532                                              0);
8533                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8534                                              0);
8535                 }
8536                 I40E_WRITE_FLUSH(hw);
8537
8538                 /* store the default input set */
8539                 pf->hash_input_set[pctype] = input_set;
8540                 pf->fdir.input_set[pctype] = input_set;
8541         }
8542 }
8543
8544 int
8545 i40e_hash_filter_inset_select(struct i40e_hw *hw,
8546                          struct rte_eth_input_set_conf *conf)
8547 {
8548         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8549         enum i40e_filter_pctype pctype;
8550         uint64_t input_set, inset_reg = 0;
8551         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8552         int ret, i, num;
8553
8554         if (!conf) {
8555                 PMD_DRV_LOG(ERR, "Invalid pointer");
8556                 return -EFAULT;
8557         }
8558         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8559             conf->op != RTE_ETH_INPUT_SET_ADD) {
8560                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8561                 return -EINVAL;
8562         }
8563
8564         if (!I40E_VALID_FLOW(conf->flow_type)) {
8565                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8566                 return -EINVAL;
8567         }
8568
8569         if (hw->mac.type == I40E_MAC_X722) {
8570                 /* get translated pctype value in fd pctype register */
8571                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
8572                         I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
8573                         conf->flow_type)));
8574         } else
8575                 pctype = i40e_flowtype_to_pctype(conf->flow_type);
8576
8577         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8578                                    conf->inset_size);
8579         if (ret) {
8580                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8581                 return -EINVAL;
8582         }
8583         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH,
8584                                     input_set) != 0) {
8585                 PMD_DRV_LOG(ERR, "Invalid input set");
8586                 return -EINVAL;
8587         }
8588         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
8589                 /* get inset value in register */
8590                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
8591                 inset_reg <<= I40E_32_BIT_WIDTH;
8592                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
8593                 input_set |= pf->hash_input_set[pctype];
8594         }
8595         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8596                                            I40E_INSET_MASK_NUM_REG);
8597         if (num < 0)
8598                 return -EINVAL;
8599
8600         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8601
8602         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
8603                               (uint32_t)(inset_reg & UINT32_MAX));
8604         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
8605                              (uint32_t)((inset_reg >>
8606                              I40E_32_BIT_WIDTH) & UINT32_MAX));
8607
8608         for (i = 0; i < num; i++)
8609                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8610                                      mask_reg[i]);
8611         /*clear unused mask registers of the pctype */
8612         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
8613                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
8614                                      0);
8615         I40E_WRITE_FLUSH(hw);
8616
8617         pf->hash_input_set[pctype] = input_set;
8618         return 0;
8619 }
8620
8621 int
8622 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
8623                          struct rte_eth_input_set_conf *conf)
8624 {
8625         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8626         enum i40e_filter_pctype pctype;
8627         uint64_t input_set, inset_reg = 0;
8628         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
8629         int ret, i, num;
8630
8631         if (!hw || !conf) {
8632                 PMD_DRV_LOG(ERR, "Invalid pointer");
8633                 return -EFAULT;
8634         }
8635         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
8636             conf->op != RTE_ETH_INPUT_SET_ADD) {
8637                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
8638                 return -EINVAL;
8639         }
8640
8641         if (!I40E_VALID_FLOW(conf->flow_type)) {
8642                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
8643                 return -EINVAL;
8644         }
8645
8646         pctype = i40e_flowtype_to_pctype(conf->flow_type);
8647
8648         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
8649                                    conf->inset_size);
8650         if (ret) {
8651                 PMD_DRV_LOG(ERR, "Failed to parse input set");
8652                 return -EINVAL;
8653         }
8654         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
8655                                     input_set) != 0) {
8656                 PMD_DRV_LOG(ERR, "Invalid input set");
8657                 return -EINVAL;
8658         }
8659
8660         /* get inset value in register */
8661         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
8662         inset_reg <<= I40E_32_BIT_WIDTH;
8663         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
8664
8665         /* Can not change the inset reg for flex payload for fdir,
8666          * it is done by writing I40E_PRTQF_FD_FLXINSET
8667          * in i40e_set_flex_mask_on_pctype.
8668          */
8669         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
8670                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
8671         else
8672                 input_set |= pf->fdir.input_set[pctype];
8673         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
8674                                            I40E_INSET_MASK_NUM_REG);
8675         if (num < 0)
8676                 return -EINVAL;
8677
8678         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
8679
8680         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
8681                               (uint32_t)(inset_reg & UINT32_MAX));
8682         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
8683                              (uint32_t)((inset_reg >>
8684                              I40E_32_BIT_WIDTH) & UINT32_MAX));
8685
8686         for (i = 0; i < num; i++)
8687                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8688                                      mask_reg[i]);
8689         /*clear unused mask registers of the pctype */
8690         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
8691                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
8692                                      0);
8693         I40E_WRITE_FLUSH(hw);
8694
8695         pf->fdir.input_set[pctype] = input_set;
8696         return 0;
8697 }
8698
8699 static int
8700 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
8701 {
8702         int ret = 0;
8703
8704         if (!hw || !info) {
8705                 PMD_DRV_LOG(ERR, "Invalid pointer");
8706                 return -EFAULT;
8707         }
8708
8709         switch (info->info_type) {
8710         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
8711                 i40e_get_symmetric_hash_enable_per_port(hw,
8712                                         &(info->info.enable));
8713                 break;
8714         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
8715                 ret = i40e_get_hash_filter_global_config(hw,
8716                                 &(info->info.global_conf));
8717                 break;
8718         default:
8719                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
8720                                                         info->info_type);
8721                 ret = -EINVAL;
8722                 break;
8723         }
8724
8725         return ret;
8726 }
8727
8728 static int
8729 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
8730 {
8731         int ret = 0;
8732
8733         if (!hw || !info) {
8734                 PMD_DRV_LOG(ERR, "Invalid pointer");
8735                 return -EFAULT;
8736         }
8737
8738         switch (info->info_type) {
8739         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
8740                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
8741                 break;
8742         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
8743                 ret = i40e_set_hash_filter_global_config(hw,
8744                                 &(info->info.global_conf));
8745                 break;
8746         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
8747                 ret = i40e_hash_filter_inset_select(hw,
8748                                                &(info->info.input_set_conf));
8749                 break;
8750
8751         default:
8752                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
8753                                                         info->info_type);
8754                 ret = -EINVAL;
8755                 break;
8756         }
8757
8758         return ret;
8759 }
8760
8761 /* Operations for hash function */
8762 static int
8763 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
8764                       enum rte_filter_op filter_op,
8765                       void *arg)
8766 {
8767         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8768         int ret = 0;
8769
8770         switch (filter_op) {
8771         case RTE_ETH_FILTER_NOP:
8772                 break;
8773         case RTE_ETH_FILTER_GET:
8774                 ret = i40e_hash_filter_get(hw,
8775                         (struct rte_eth_hash_filter_info *)arg);
8776                 break;
8777         case RTE_ETH_FILTER_SET:
8778                 ret = i40e_hash_filter_set(hw,
8779                         (struct rte_eth_hash_filter_info *)arg);
8780                 break;
8781         default:
8782                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
8783                                                                 filter_op);
8784                 ret = -ENOTSUP;
8785                 break;
8786         }
8787
8788         return ret;
8789 }
8790
8791 /* Convert ethertype filter structure */
8792 static int
8793 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
8794                               struct i40e_ethertype_filter *filter)
8795 {
8796         rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
8797         filter->input.ether_type = input->ether_type;
8798         filter->flags = input->flags;
8799         filter->queue = input->queue;
8800
8801         return 0;
8802 }
8803
8804 /* Check if there exists the ehtertype filter */
8805 struct i40e_ethertype_filter *
8806 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
8807                                 const struct i40e_ethertype_filter_input *input)
8808 {
8809         int ret;
8810
8811         ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
8812         if (ret < 0)
8813                 return NULL;
8814
8815         return ethertype_rule->hash_map[ret];
8816 }
8817
8818 /* Add ethertype filter in SW list */
8819 static int
8820 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
8821                                 struct i40e_ethertype_filter *filter)
8822 {
8823         struct i40e_ethertype_rule *rule = &pf->ethertype;
8824         int ret;
8825
8826         ret = rte_hash_add_key(rule->hash_table, &filter->input);
8827         if (ret < 0) {
8828                 PMD_DRV_LOG(ERR,
8829                             "Failed to insert ethertype filter"
8830                             " to hash table %d!",
8831                             ret);
8832                 return ret;
8833         }
8834         rule->hash_map[ret] = filter;
8835
8836         TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
8837
8838         return 0;
8839 }
8840
8841 /* Delete ethertype filter in SW list */
8842 int
8843 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
8844                              struct i40e_ethertype_filter_input *input)
8845 {
8846         struct i40e_ethertype_rule *rule = &pf->ethertype;
8847         struct i40e_ethertype_filter *filter;
8848         int ret;
8849
8850         ret = rte_hash_del_key(rule->hash_table, input);
8851         if (ret < 0) {
8852                 PMD_DRV_LOG(ERR,
8853                             "Failed to delete ethertype filter"
8854                             " to hash table %d!",
8855                             ret);
8856                 return ret;
8857         }
8858         filter = rule->hash_map[ret];
8859         rule->hash_map[ret] = NULL;
8860
8861         TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
8862         rte_free(filter);
8863
8864         return 0;
8865 }
8866
8867 /*
8868  * Configure ethertype filter, which can director packet by filtering
8869  * with mac address and ether_type or only ether_type
8870  */
8871 int
8872 i40e_ethertype_filter_set(struct i40e_pf *pf,
8873                         struct rte_eth_ethertype_filter *filter,
8874                         bool add)
8875 {
8876         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8877         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
8878         struct i40e_ethertype_filter *ethertype_filter, *node;
8879         struct i40e_ethertype_filter check_filter;
8880         struct i40e_control_filter_stats stats;
8881         uint16_t flags = 0;
8882         int ret;
8883
8884         if (filter->queue >= pf->dev_data->nb_rx_queues) {
8885                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8886                 return -EINVAL;
8887         }
8888         if (filter->ether_type == ETHER_TYPE_IPv4 ||
8889                 filter->ether_type == ETHER_TYPE_IPv6) {
8890                 PMD_DRV_LOG(ERR,
8891                         "unsupported ether_type(0x%04x) in control packet filter.",
8892                         filter->ether_type);
8893                 return -EINVAL;
8894         }
8895         if (filter->ether_type == ETHER_TYPE_VLAN)
8896                 PMD_DRV_LOG(WARNING,
8897                         "filter vlan ether_type in first tag is not supported.");
8898
8899         /* Check if there is the filter in SW list */
8900         memset(&check_filter, 0, sizeof(check_filter));
8901         i40e_ethertype_filter_convert(filter, &check_filter);
8902         node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
8903                                                &check_filter.input);
8904         if (add && node) {
8905                 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
8906                 return -EINVAL;
8907         }
8908
8909         if (!add && !node) {
8910                 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
8911                 return -EINVAL;
8912         }
8913
8914         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
8915                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
8916         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
8917                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
8918         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
8919
8920         memset(&stats, 0, sizeof(stats));
8921         ret = i40e_aq_add_rem_control_packet_filter(hw,
8922                         filter->mac_addr.addr_bytes,
8923                         filter->ether_type, flags,
8924                         pf->main_vsi->seid,
8925                         filter->queue, add, &stats, NULL);
8926
8927         PMD_DRV_LOG(INFO,
8928                 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
8929                 ret, stats.mac_etype_used, stats.etype_used,
8930                 stats.mac_etype_free, stats.etype_free);
8931         if (ret < 0)
8932                 return -ENOSYS;
8933
8934         /* Add or delete a filter in SW list */
8935         if (add) {
8936                 ethertype_filter = rte_zmalloc("ethertype_filter",
8937                                        sizeof(*ethertype_filter), 0);
8938                 rte_memcpy(ethertype_filter, &check_filter,
8939                            sizeof(check_filter));
8940                 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
8941         } else {
8942                 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
8943         }
8944
8945         return ret;
8946 }
8947
8948 /*
8949  * Handle operations for ethertype filter.
8950  */
8951 static int
8952 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
8953                                 enum rte_filter_op filter_op,
8954                                 void *arg)
8955 {
8956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8957         int ret = 0;
8958
8959         if (filter_op == RTE_ETH_FILTER_NOP)
8960                 return ret;
8961
8962         if (arg == NULL) {
8963                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
8964                             filter_op);
8965                 return -EINVAL;
8966         }
8967
8968         switch (filter_op) {
8969         case RTE_ETH_FILTER_ADD:
8970                 ret = i40e_ethertype_filter_set(pf,
8971                         (struct rte_eth_ethertype_filter *)arg,
8972                         TRUE);
8973                 break;
8974         case RTE_ETH_FILTER_DELETE:
8975                 ret = i40e_ethertype_filter_set(pf,
8976                         (struct rte_eth_ethertype_filter *)arg,
8977                         FALSE);
8978                 break;
8979         default:
8980                 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
8981                 ret = -ENOSYS;
8982                 break;
8983         }
8984         return ret;
8985 }
8986
8987 static int
8988 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
8989                      enum rte_filter_type filter_type,
8990                      enum rte_filter_op filter_op,
8991                      void *arg)
8992 {
8993         int ret = 0;
8994
8995         if (dev == NULL)
8996                 return -EINVAL;
8997
8998         switch (filter_type) {
8999         case RTE_ETH_FILTER_NONE:
9000                 /* For global configuration */
9001                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9002                 break;
9003         case RTE_ETH_FILTER_HASH:
9004                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9005                 break;
9006         case RTE_ETH_FILTER_MACVLAN:
9007                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9008                 break;
9009         case RTE_ETH_FILTER_ETHERTYPE:
9010                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9011                 break;
9012         case RTE_ETH_FILTER_TUNNEL:
9013                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9014                 break;
9015         case RTE_ETH_FILTER_FDIR:
9016                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9017                 break;
9018         case RTE_ETH_FILTER_GENERIC:
9019                 if (filter_op != RTE_ETH_FILTER_GET)
9020                         return -EINVAL;
9021                 *(const void **)arg = &i40e_flow_ops;
9022                 break;
9023         default:
9024                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9025                                                         filter_type);
9026                 ret = -EINVAL;
9027                 break;
9028         }
9029
9030         return ret;
9031 }
9032
9033 /*
9034  * Check and enable Extended Tag.
9035  * Enabling Extended Tag is important for 40G performance.
9036  */
9037 static void
9038 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9039 {
9040         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9041         uint32_t buf = 0;
9042         int ret;
9043
9044         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9045                                       PCI_DEV_CAP_REG);
9046         if (ret < 0) {
9047                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9048                             PCI_DEV_CAP_REG);
9049                 return;
9050         }
9051         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9052                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9053                 return;
9054         }
9055
9056         buf = 0;
9057         ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9058                                       PCI_DEV_CTRL_REG);
9059         if (ret < 0) {
9060                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9061                             PCI_DEV_CTRL_REG);
9062                 return;
9063         }
9064         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9065                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9066                 return;
9067         }
9068         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9069         ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9070                                        PCI_DEV_CTRL_REG);
9071         if (ret < 0) {
9072                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9073                             PCI_DEV_CTRL_REG);
9074                 return;
9075         }
9076 }
9077
9078 /*
9079  * As some registers wouldn't be reset unless a global hardware reset,
9080  * hardware initialization is needed to put those registers into an
9081  * expected initial state.
9082  */
9083 static void
9084 i40e_hw_init(struct rte_eth_dev *dev)
9085 {
9086         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9087
9088         i40e_enable_extended_tag(dev);
9089
9090         /* clear the PF Queue Filter control register */
9091         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9092
9093         /* Disable symmetric hash per port */
9094         i40e_set_symmetric_hash_enable_per_port(hw, 0);
9095 }
9096
9097 enum i40e_filter_pctype
9098 i40e_flowtype_to_pctype(uint16_t flow_type)
9099 {
9100         static const enum i40e_filter_pctype pctype_table[] = {
9101                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
9102                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
9103                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9104                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
9105                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9106                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
9107                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9108                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
9109                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9110                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
9111                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
9112                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9113                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
9114                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9115                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
9116                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9117                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
9118                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9119                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
9120         };
9121
9122         return pctype_table[flow_type];
9123 }
9124
9125 uint16_t
9126 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
9127 {
9128         static const uint16_t flowtype_table[] = {
9129                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
9130                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9131                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
9132                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9133                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
9134                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9135                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
9136                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9137                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
9138                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9139                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
9140                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9141                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
9142                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9143                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
9144                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
9145                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9146                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
9147                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9148                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
9149                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9150                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
9151                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9152                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
9153                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9154                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
9155                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9156                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
9157                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9158                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
9159                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
9160         };
9161
9162         return flowtype_table[pctype];
9163 }
9164
9165 /*
9166  * On X710, performance number is far from the expectation on recent firmware
9167  * versions; on XL710, performance number is also far from the expectation on
9168  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9169  * mode is enabled and port MAC address is equal to the packet destination MAC
9170  * address. The fix for this issue may not be integrated in the following
9171  * firmware version. So the workaround in software driver is needed. It needs
9172  * to modify the initial values of 3 internal only registers for both X710 and
9173  * XL710. Note that the values for X710 or XL710 could be different, and the
9174  * workaround can be removed when it is fixed in firmware in the future.
9175  */
9176
9177 /* For both X710 and XL710 */
9178 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
9179 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
9180
9181 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9182 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9183
9184 /* For X722 */
9185 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9186 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9187
9188 /* For X710 */
9189 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9190 /* For XL710 */
9191 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9192 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
9193
9194 static int
9195 i40e_dev_sync_phy_type(struct i40e_hw *hw)
9196 {
9197         enum i40e_status_code status;
9198         struct i40e_aq_get_phy_abilities_resp phy_ab;
9199         int ret = -ENOTSUP;
9200
9201         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
9202                                               NULL);
9203
9204         if (status) {
9205                 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
9206                         status);
9207                 return ret;
9208         }
9209
9210         return 0;
9211 }
9212
9213 static void
9214 i40e_configure_registers(struct i40e_hw *hw)
9215 {
9216         static struct {
9217                 uint32_t addr;
9218                 uint64_t val;
9219         } reg_table[] = {
9220                 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
9221                 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
9222                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
9223         };
9224         uint64_t reg;
9225         uint32_t i;
9226         int ret;
9227
9228         for (i = 0; i < RTE_DIM(reg_table); i++) {
9229                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
9230                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9231                                 reg_table[i].val =
9232                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9233                         else /* For X710/XL710/XXV710 */
9234                                 reg_table[i].val =
9235                                         I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
9236                 }
9237
9238                 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
9239                         if (hw->mac.type == I40E_MAC_X722) /* For X722 */
9240                                 reg_table[i].val =
9241                                         I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9242                         else /* For X710/XL710/XXV710 */
9243                                 reg_table[i].val =
9244                                         I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
9245                 }
9246
9247                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
9248                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
9249                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
9250                                 reg_table[i].val =
9251                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
9252                         else /* For X710 */
9253                                 reg_table[i].val =
9254                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
9255                 }
9256
9257                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
9258                                                         &reg, NULL);
9259                 if (ret < 0) {
9260                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
9261                                                         reg_table[i].addr);
9262                         break;
9263                 }
9264                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
9265                                                 reg_table[i].addr, reg);
9266                 if (reg == reg_table[i].val)
9267                         continue;
9268
9269                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
9270                                                 reg_table[i].val, NULL);
9271                 if (ret < 0) {
9272                         PMD_DRV_LOG(ERR,
9273                                 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
9274                                 reg_table[i].val, reg_table[i].addr);
9275                         break;
9276                 }
9277                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
9278                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
9279         }
9280 }
9281
9282 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
9283 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
9284 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
9285 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
9286 static int
9287 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
9288 {
9289         uint32_t reg;
9290         int ret;
9291
9292         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
9293                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
9294                 return -EINVAL;
9295         }
9296
9297         /* Configure for double VLAN RX stripping */
9298         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
9299         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
9300                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
9301                 ret = i40e_aq_debug_write_register(hw,
9302                                                    I40E_VSI_TSR(vsi->vsi_id),
9303                                                    reg, NULL);
9304                 if (ret < 0) {
9305                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
9306                                     vsi->vsi_id);
9307                         return I40E_ERR_CONFIG;
9308                 }
9309         }
9310
9311         /* Configure for double VLAN TX insertion */
9312         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
9313         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
9314                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
9315                 ret = i40e_aq_debug_write_register(hw,
9316                                                    I40E_VSI_L2TAGSTXVALID(
9317                                                    vsi->vsi_id), reg, NULL);
9318                 if (ret < 0) {
9319                         PMD_DRV_LOG(ERR,
9320                                 "Failed to update VSI_L2TAGSTXVALID[%d]",
9321                                 vsi->vsi_id);
9322                         return I40E_ERR_CONFIG;
9323                 }
9324         }
9325
9326         return 0;
9327 }
9328
9329 /**
9330  * i40e_aq_add_mirror_rule
9331  * @hw: pointer to the hardware structure
9332  * @seid: VEB seid to add mirror rule to
9333  * @dst_id: destination vsi seid
9334  * @entries: Buffer which contains the entities to be mirrored
9335  * @count: number of entities contained in the buffer
9336  * @rule_id:the rule_id of the rule to be added
9337  *
9338  * Add a mirror rule for a given veb.
9339  *
9340  **/
9341 static enum i40e_status_code
9342 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
9343                         uint16_t seid, uint16_t dst_id,
9344                         uint16_t rule_type, uint16_t *entries,
9345                         uint16_t count, uint16_t *rule_id)
9346 {
9347         struct i40e_aq_desc desc;
9348         struct i40e_aqc_add_delete_mirror_rule cmd;
9349         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
9350                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
9351                 &desc.params.raw;
9352         uint16_t buff_len;
9353         enum i40e_status_code status;
9354
9355         i40e_fill_default_direct_cmd_desc(&desc,
9356                                           i40e_aqc_opc_add_mirror_rule);
9357         memset(&cmd, 0, sizeof(cmd));
9358
9359         buff_len = sizeof(uint16_t) * count;
9360         desc.datalen = rte_cpu_to_le_16(buff_len);
9361         if (buff_len > 0)
9362                 desc.flags |= rte_cpu_to_le_16(
9363                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
9364         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9365                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9366         cmd.num_entries = rte_cpu_to_le_16(count);
9367         cmd.seid = rte_cpu_to_le_16(seid);
9368         cmd.destination = rte_cpu_to_le_16(dst_id);
9369
9370         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9371         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
9372         PMD_DRV_LOG(INFO,
9373                 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
9374                 hw->aq.asq_last_status, resp->rule_id,
9375                 resp->mirror_rules_used, resp->mirror_rules_free);
9376         *rule_id = rte_le_to_cpu_16(resp->rule_id);
9377
9378         return status;
9379 }
9380
9381 /**
9382  * i40e_aq_del_mirror_rule
9383  * @hw: pointer to the hardware structure
9384  * @seid: VEB seid to add mirror rule to
9385  * @entries: Buffer which contains the entities to be mirrored
9386  * @count: number of entities contained in the buffer
9387  * @rule_id:the rule_id of the rule to be delete
9388  *
9389  * Delete a mirror rule for a given veb.
9390  *
9391  **/
9392 static enum i40e_status_code
9393 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
9394                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
9395                 uint16_t count, uint16_t rule_id)
9396 {
9397         struct i40e_aq_desc desc;
9398         struct i40e_aqc_add_delete_mirror_rule cmd;
9399         uint16_t buff_len = 0;
9400         enum i40e_status_code status;
9401         void *buff = NULL;
9402
9403         i40e_fill_default_direct_cmd_desc(&desc,
9404                                           i40e_aqc_opc_delete_mirror_rule);
9405         memset(&cmd, 0, sizeof(cmd));
9406         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
9407                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
9408                                                           I40E_AQ_FLAG_RD));
9409                 cmd.num_entries = count;
9410                 buff_len = sizeof(uint16_t) * count;
9411                 desc.datalen = rte_cpu_to_le_16(buff_len);
9412                 buff = (void *)entries;
9413         } else
9414                 /* rule id is filled in destination field for deleting mirror rule */
9415                 cmd.destination = rte_cpu_to_le_16(rule_id);
9416
9417         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
9418                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
9419         cmd.seid = rte_cpu_to_le_16(seid);
9420
9421         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
9422         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
9423
9424         return status;
9425 }
9426
9427 /**
9428  * i40e_mirror_rule_set
9429  * @dev: pointer to the hardware structure
9430  * @mirror_conf: mirror rule info
9431  * @sw_id: mirror rule's sw_id
9432  * @on: enable/disable
9433  *
9434  * set a mirror rule.
9435  *
9436  **/
9437 static int
9438 i40e_mirror_rule_set(struct rte_eth_dev *dev,
9439                         struct rte_eth_mirror_conf *mirror_conf,
9440                         uint8_t sw_id, uint8_t on)
9441 {
9442         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9443         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9444         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9445         struct i40e_mirror_rule *parent = NULL;
9446         uint16_t seid, dst_seid, rule_id;
9447         uint16_t i, j = 0;
9448         int ret;
9449
9450         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
9451
9452         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
9453                 PMD_DRV_LOG(ERR,
9454                         "mirror rule can not be configured without veb or vfs.");
9455                 return -ENOSYS;
9456         }
9457         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
9458                 PMD_DRV_LOG(ERR, "mirror table is full.");
9459                 return -ENOSPC;
9460         }
9461         if (mirror_conf->dst_pool > pf->vf_num) {
9462                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
9463                                  mirror_conf->dst_pool);
9464                 return -EINVAL;
9465         }
9466
9467         seid = pf->main_vsi->veb->seid;
9468
9469         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9470                 if (sw_id <= it->index) {
9471                         mirr_rule = it;
9472                         break;
9473                 }
9474                 parent = it;
9475         }
9476         if (mirr_rule && sw_id == mirr_rule->index) {
9477                 if (on) {
9478                         PMD_DRV_LOG(ERR, "mirror rule exists.");
9479                         return -EEXIST;
9480                 } else {
9481                         ret = i40e_aq_del_mirror_rule(hw, seid,
9482                                         mirr_rule->rule_type,
9483                                         mirr_rule->entries,
9484                                         mirr_rule->num_entries, mirr_rule->id);
9485                         if (ret < 0) {
9486                                 PMD_DRV_LOG(ERR,
9487                                         "failed to remove mirror rule: ret = %d, aq_err = %d.",
9488                                         ret, hw->aq.asq_last_status);
9489                                 return -ENOSYS;
9490                         }
9491                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9492                         rte_free(mirr_rule);
9493                         pf->nb_mirror_rule--;
9494                         return 0;
9495                 }
9496         } else if (!on) {
9497                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9498                 return -ENOENT;
9499         }
9500
9501         mirr_rule = rte_zmalloc("i40e_mirror_rule",
9502                                 sizeof(struct i40e_mirror_rule) , 0);
9503         if (!mirr_rule) {
9504                 PMD_DRV_LOG(ERR, "failed to allocate memory");
9505                 return I40E_ERR_NO_MEMORY;
9506         }
9507         switch (mirror_conf->rule_type) {
9508         case ETH_MIRROR_VLAN:
9509                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
9510                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
9511                                 mirr_rule->entries[j] =
9512                                         mirror_conf->vlan.vlan_id[i];
9513                                 j++;
9514                         }
9515                 }
9516                 if (j == 0) {
9517                         PMD_DRV_LOG(ERR, "vlan is not specified.");
9518                         rte_free(mirr_rule);
9519                         return -EINVAL;
9520                 }
9521                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
9522                 break;
9523         case ETH_MIRROR_VIRTUAL_POOL_UP:
9524         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
9525                 /* check if the specified pool bit is out of range */
9526                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
9527                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
9528                         rte_free(mirr_rule);
9529                         return -EINVAL;
9530                 }
9531                 for (i = 0, j = 0; i < pf->vf_num; i++) {
9532                         if (mirror_conf->pool_mask & (1ULL << i)) {
9533                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
9534                                 j++;
9535                         }
9536                 }
9537                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
9538                         /* add pf vsi to entries */
9539                         mirr_rule->entries[j] = pf->main_vsi_seid;
9540                         j++;
9541                 }
9542                 if (j == 0) {
9543                         PMD_DRV_LOG(ERR, "pool is not specified.");
9544                         rte_free(mirr_rule);
9545                         return -EINVAL;
9546                 }
9547                 /* egress and ingress in aq commands means from switch but not port */
9548                 mirr_rule->rule_type =
9549                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
9550                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
9551                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
9552                 break;
9553         case ETH_MIRROR_UPLINK_PORT:
9554                 /* egress and ingress in aq commands means from switch but not port*/
9555                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
9556                 break;
9557         case ETH_MIRROR_DOWNLINK_PORT:
9558                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
9559                 break;
9560         default:
9561                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
9562                         mirror_conf->rule_type);
9563                 rte_free(mirr_rule);
9564                 return -EINVAL;
9565         }
9566
9567         /* If the dst_pool is equal to vf_num, consider it as PF */
9568         if (mirror_conf->dst_pool == pf->vf_num)
9569                 dst_seid = pf->main_vsi_seid;
9570         else
9571                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
9572
9573         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
9574                                       mirr_rule->rule_type, mirr_rule->entries,
9575                                       j, &rule_id);
9576         if (ret < 0) {
9577                 PMD_DRV_LOG(ERR,
9578                         "failed to add mirror rule: ret = %d, aq_err = %d.",
9579                         ret, hw->aq.asq_last_status);
9580                 rte_free(mirr_rule);
9581                 return -ENOSYS;
9582         }
9583
9584         mirr_rule->index = sw_id;
9585         mirr_rule->num_entries = j;
9586         mirr_rule->id = rule_id;
9587         mirr_rule->dst_vsi_seid = dst_seid;
9588
9589         if (parent)
9590                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
9591         else
9592                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
9593
9594         pf->nb_mirror_rule++;
9595         return 0;
9596 }
9597
9598 /**
9599  * i40e_mirror_rule_reset
9600  * @dev: pointer to the device
9601  * @sw_id: mirror rule's sw_id
9602  *
9603  * reset a mirror rule.
9604  *
9605  **/
9606 static int
9607 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
9608 {
9609         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9610         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9611         struct i40e_mirror_rule *it, *mirr_rule = NULL;
9612         uint16_t seid;
9613         int ret;
9614
9615         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
9616
9617         seid = pf->main_vsi->veb->seid;
9618
9619         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
9620                 if (sw_id == it->index) {
9621                         mirr_rule = it;
9622                         break;
9623                 }
9624         }
9625         if (mirr_rule) {
9626                 ret = i40e_aq_del_mirror_rule(hw, seid,
9627                                 mirr_rule->rule_type,
9628                                 mirr_rule->entries,
9629                                 mirr_rule->num_entries, mirr_rule->id);
9630                 if (ret < 0) {
9631                         PMD_DRV_LOG(ERR,
9632                                 "failed to remove mirror rule: status = %d, aq_err = %d.",
9633                                 ret, hw->aq.asq_last_status);
9634                         return -ENOSYS;
9635                 }
9636                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
9637                 rte_free(mirr_rule);
9638                 pf->nb_mirror_rule--;
9639         } else {
9640                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
9641                 return -ENOENT;
9642         }
9643         return 0;
9644 }
9645
9646 static uint64_t
9647 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
9648 {
9649         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9650         uint64_t systim_cycles;
9651
9652         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
9653         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
9654                         << 32;
9655
9656         return systim_cycles;
9657 }
9658
9659 static uint64_t
9660 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
9661 {
9662         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9663         uint64_t rx_tstamp;
9664
9665         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
9666         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
9667                         << 32;
9668
9669         return rx_tstamp;
9670 }
9671
9672 static uint64_t
9673 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
9674 {
9675         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9676         uint64_t tx_tstamp;
9677
9678         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
9679         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
9680                         << 32;
9681
9682         return tx_tstamp;
9683 }
9684
9685 static void
9686 i40e_start_timecounters(struct rte_eth_dev *dev)
9687 {
9688         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9689         struct i40e_adapter *adapter =
9690                         (struct i40e_adapter *)dev->data->dev_private;
9691         struct rte_eth_link link;
9692         uint32_t tsync_inc_l;
9693         uint32_t tsync_inc_h;
9694
9695         /* Get current link speed. */
9696         memset(&link, 0, sizeof(link));
9697         i40e_dev_link_update(dev, 1);
9698         rte_i40e_dev_atomic_read_link_status(dev, &link);
9699
9700         switch (link.link_speed) {
9701         case ETH_SPEED_NUM_40G:
9702                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
9703                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
9704                 break;
9705         case ETH_SPEED_NUM_10G:
9706                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
9707                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
9708                 break;
9709         case ETH_SPEED_NUM_1G:
9710                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
9711                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
9712                 break;
9713         default:
9714                 tsync_inc_l = 0x0;
9715                 tsync_inc_h = 0x0;
9716         }
9717
9718         /* Set the timesync increment value. */
9719         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
9720         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
9721
9722         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
9723         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
9724         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
9725
9726         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9727         adapter->systime_tc.cc_shift = 0;
9728         adapter->systime_tc.nsec_mask = 0;
9729
9730         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9731         adapter->rx_tstamp_tc.cc_shift = 0;
9732         adapter->rx_tstamp_tc.nsec_mask = 0;
9733
9734         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
9735         adapter->tx_tstamp_tc.cc_shift = 0;
9736         adapter->tx_tstamp_tc.nsec_mask = 0;
9737 }
9738
9739 static int
9740 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
9741 {
9742         struct i40e_adapter *adapter =
9743                         (struct i40e_adapter *)dev->data->dev_private;
9744
9745         adapter->systime_tc.nsec += delta;
9746         adapter->rx_tstamp_tc.nsec += delta;
9747         adapter->tx_tstamp_tc.nsec += delta;
9748
9749         return 0;
9750 }
9751
9752 static int
9753 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
9754 {
9755         uint64_t ns;
9756         struct i40e_adapter *adapter =
9757                         (struct i40e_adapter *)dev->data->dev_private;
9758
9759         ns = rte_timespec_to_ns(ts);
9760
9761         /* Set the timecounters to a new value. */
9762         adapter->systime_tc.nsec = ns;
9763         adapter->rx_tstamp_tc.nsec = ns;
9764         adapter->tx_tstamp_tc.nsec = ns;
9765
9766         return 0;
9767 }
9768
9769 static int
9770 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
9771 {
9772         uint64_t ns, systime_cycles;
9773         struct i40e_adapter *adapter =
9774                         (struct i40e_adapter *)dev->data->dev_private;
9775
9776         systime_cycles = i40e_read_systime_cyclecounter(dev);
9777         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
9778         *ts = rte_ns_to_timespec(ns);
9779
9780         return 0;
9781 }
9782
9783 static int
9784 i40e_timesync_enable(struct rte_eth_dev *dev)
9785 {
9786         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9787         uint32_t tsync_ctl_l;
9788         uint32_t tsync_ctl_h;
9789
9790         /* Stop the timesync system time. */
9791         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
9792         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
9793         /* Reset the timesync system time value. */
9794         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
9795         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
9796
9797         i40e_start_timecounters(dev);
9798
9799         /* Clear timesync registers. */
9800         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
9801         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
9802         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
9803         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
9804         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
9805         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
9806
9807         /* Enable timestamping of PTP packets. */
9808         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
9809         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
9810
9811         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
9812         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
9813         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
9814
9815         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
9816         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
9817
9818         return 0;
9819 }
9820
9821 static int
9822 i40e_timesync_disable(struct rte_eth_dev *dev)
9823 {
9824         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9825         uint32_t tsync_ctl_l;
9826         uint32_t tsync_ctl_h;
9827
9828         /* Disable timestamping of transmitted PTP packets. */
9829         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
9830         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
9831
9832         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
9833         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
9834
9835         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
9836         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
9837
9838         /* Reset the timesync increment value. */
9839         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
9840         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
9841
9842         return 0;
9843 }
9844
9845 static int
9846 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
9847                                 struct timespec *timestamp, uint32_t flags)
9848 {
9849         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9850         struct i40e_adapter *adapter =
9851                 (struct i40e_adapter *)dev->data->dev_private;
9852
9853         uint32_t sync_status;
9854         uint32_t index = flags & 0x03;
9855         uint64_t rx_tstamp_cycles;
9856         uint64_t ns;
9857
9858         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
9859         if ((sync_status & (1 << index)) == 0)
9860                 return -EINVAL;
9861
9862         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
9863         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
9864         *timestamp = rte_ns_to_timespec(ns);
9865
9866         return 0;
9867 }
9868
9869 static int
9870 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
9871                                 struct timespec *timestamp)
9872 {
9873         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9874         struct i40e_adapter *adapter =
9875                 (struct i40e_adapter *)dev->data->dev_private;
9876
9877         uint32_t sync_status;
9878         uint64_t tx_tstamp_cycles;
9879         uint64_t ns;
9880
9881         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
9882         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
9883                 return -EINVAL;
9884
9885         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
9886         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
9887         *timestamp = rte_ns_to_timespec(ns);
9888
9889         return 0;
9890 }
9891
9892 /*
9893  * i40e_parse_dcb_configure - parse dcb configure from user
9894  * @dev: the device being configured
9895  * @dcb_cfg: pointer of the result of parse
9896  * @*tc_map: bit map of enabled traffic classes
9897  *
9898  * Returns 0 on success, negative value on failure
9899  */
9900 static int
9901 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
9902                          struct i40e_dcbx_config *dcb_cfg,
9903                          uint8_t *tc_map)
9904 {
9905         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
9906         uint8_t i, tc_bw, bw_lf;
9907
9908         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
9909
9910         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
9911         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
9912                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
9913                 return -EINVAL;
9914         }
9915
9916         /* assume each tc has the same bw */
9917         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
9918         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9919                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
9920         /* to ensure the sum of tcbw is equal to 100 */
9921         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
9922         for (i = 0; i < bw_lf; i++)
9923                 dcb_cfg->etscfg.tcbwtable[i]++;
9924
9925         /* assume each tc has the same Transmission Selection Algorithm */
9926         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9927                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
9928
9929         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
9930                 dcb_cfg->etscfg.prioritytable[i] =
9931                                 dcb_rx_conf->dcb_tc[i];
9932
9933         /* FW needs one App to configure HW */
9934         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
9935         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
9936         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
9937         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
9938
9939         if (dcb_rx_conf->nb_tcs == 0)
9940                 *tc_map = 1; /* tc0 only */
9941         else
9942                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
9943
9944         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
9945                 dcb_cfg->pfc.willing = 0;
9946                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
9947                 dcb_cfg->pfc.pfcenable = *tc_map;
9948         }
9949         return 0;
9950 }
9951
9952
9953 static enum i40e_status_code
9954 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
9955                               struct i40e_aqc_vsi_properties_data *info,
9956                               uint8_t enabled_tcmap)
9957 {
9958         enum i40e_status_code ret;
9959         int i, total_tc = 0;
9960         uint16_t qpnum_per_tc, bsf, qp_idx;
9961         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
9962         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
9963         uint16_t used_queues;
9964
9965         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
9966         if (ret != I40E_SUCCESS)
9967                 return ret;
9968
9969         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9970                 if (enabled_tcmap & (1 << i))
9971                         total_tc++;
9972         }
9973         if (total_tc == 0)
9974                 total_tc = 1;
9975         vsi->enabled_tc = enabled_tcmap;
9976
9977         /* different VSI has different queues assigned */
9978         if (vsi->type == I40E_VSI_MAIN)
9979                 used_queues = dev_data->nb_rx_queues -
9980                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9981         else if (vsi->type == I40E_VSI_VMDQ2)
9982                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9983         else {
9984                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
9985                 return I40E_ERR_NO_AVAILABLE_VSI;
9986         }
9987
9988         qpnum_per_tc = used_queues / total_tc;
9989         /* Number of queues per enabled TC */
9990         if (qpnum_per_tc == 0) {
9991                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
9992                 return I40E_ERR_INVALID_QP_ID;
9993         }
9994         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
9995                                 I40E_MAX_Q_PER_TC);
9996         bsf = rte_bsf32(qpnum_per_tc);
9997
9998         /**
9999          * Configure TC and queue mapping parameters, for enabled TC,
10000          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10001          * default queue will serve it.
10002          */
10003         qp_idx = 0;
10004         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10005                 if (vsi->enabled_tc & (1 << i)) {
10006                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10007                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10008                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10009                         qp_idx += qpnum_per_tc;
10010                 } else
10011                         info->tc_mapping[i] = 0;
10012         }
10013
10014         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10015         if (vsi->type == I40E_VSI_SRIOV) {
10016                 info->mapping_flags |=
10017                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10018                 for (i = 0; i < vsi->nb_qps; i++)
10019                         info->queue_mapping[i] =
10020                                 rte_cpu_to_le_16(vsi->base_queue + i);
10021         } else {
10022                 info->mapping_flags |=
10023                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10024                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10025         }
10026         info->valid_sections |=
10027                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10028
10029         return I40E_SUCCESS;
10030 }
10031
10032 /*
10033  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10034  * @veb: VEB to be configured
10035  * @tc_map: enabled TC bitmap
10036  *
10037  * Returns 0 on success, negative value on failure
10038  */
10039 static enum i40e_status_code
10040 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10041 {
10042         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10043         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10044         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10045         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10046         enum i40e_status_code ret = I40E_SUCCESS;
10047         int i;
10048         uint32_t bw_max;
10049
10050         /* Check if enabled_tc is same as existing or new TCs */
10051         if (veb->enabled_tc == tc_map)
10052                 return ret;
10053
10054         /* configure tc bandwidth */
10055         memset(&veb_bw, 0, sizeof(veb_bw));
10056         veb_bw.tc_valid_bits = tc_map;
10057         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10058         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10059                 if (tc_map & BIT_ULL(i))
10060                         veb_bw.tc_bw_share_credits[i] = 1;
10061         }
10062         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10063                                                    &veb_bw, NULL);
10064         if (ret) {
10065                 PMD_INIT_LOG(ERR,
10066                         "AQ command Config switch_comp BW allocation per TC failed = %d",
10067                         hw->aq.asq_last_status);
10068                 return ret;
10069         }
10070
10071         memset(&ets_query, 0, sizeof(ets_query));
10072         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10073                                                    &ets_query, NULL);
10074         if (ret != I40E_SUCCESS) {
10075                 PMD_DRV_LOG(ERR,
10076                         "Failed to get switch_comp ETS configuration %u",
10077                         hw->aq.asq_last_status);
10078                 return ret;
10079         }
10080         memset(&bw_query, 0, sizeof(bw_query));
10081         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10082                                                   &bw_query, NULL);
10083         if (ret != I40E_SUCCESS) {
10084                 PMD_DRV_LOG(ERR,
10085                         "Failed to get switch_comp bandwidth configuration %u",
10086                         hw->aq.asq_last_status);
10087                 return ret;
10088         }
10089
10090         /* store and print out BW info */
10091         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10092         veb->bw_info.bw_max = ets_query.tc_bw_max;
10093         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10094         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10095         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10096                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10097                      I40E_16_BIT_WIDTH);
10098         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10099                 veb->bw_info.bw_ets_share_credits[i] =
10100                                 bw_query.tc_bw_share_credits[i];
10101                 veb->bw_info.bw_ets_credits[i] =
10102                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10103                 /* 4 bits per TC, 4th bit is reserved */
10104                 veb->bw_info.bw_ets_max[i] =
10105                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10106                                   RTE_LEN2MASK(3, uint8_t));
10107                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10108                             veb->bw_info.bw_ets_share_credits[i]);
10109                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10110                             veb->bw_info.bw_ets_credits[i]);
10111                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10112                             veb->bw_info.bw_ets_max[i]);
10113         }
10114
10115         veb->enabled_tc = tc_map;
10116
10117         return ret;
10118 }
10119
10120
10121 /*
10122  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10123  * @vsi: VSI to be configured
10124  * @tc_map: enabled TC bitmap
10125  *
10126  * Returns 0 on success, negative value on failure
10127  */
10128 static enum i40e_status_code
10129 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10130 {
10131         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10132         struct i40e_vsi_context ctxt;
10133         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10134         enum i40e_status_code ret = I40E_SUCCESS;
10135         int i;
10136
10137         /* Check if enabled_tc is same as existing or new TCs */
10138         if (vsi->enabled_tc == tc_map)
10139                 return ret;
10140
10141         /* configure tc bandwidth */
10142         memset(&bw_data, 0, sizeof(bw_data));
10143         bw_data.tc_valid_bits = tc_map;
10144         /* Enable ETS TCs with equal BW Share for now across all VSIs */
10145         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10146                 if (tc_map & BIT_ULL(i))
10147                         bw_data.tc_bw_credits[i] = 1;
10148         }
10149         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10150         if (ret) {
10151                 PMD_INIT_LOG(ERR,
10152                         "AQ command Config VSI BW allocation per TC failed = %d",
10153                         hw->aq.asq_last_status);
10154                 goto out;
10155         }
10156         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10157                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10158
10159         /* Update Queue Pairs Mapping for currently enabled UPs */
10160         ctxt.seid = vsi->seid;
10161         ctxt.pf_num = hw->pf_id;
10162         ctxt.vf_num = 0;
10163         ctxt.uplink_seid = vsi->uplink_seid;
10164         ctxt.info = vsi->info;
10165         i40e_get_cap(hw);
10166         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10167         if (ret)
10168                 goto out;
10169
10170         /* Update the VSI after updating the VSI queue-mapping information */
10171         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10172         if (ret) {
10173                 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10174                         hw->aq.asq_last_status);
10175                 goto out;
10176         }
10177         /* update the local VSI info with updated queue map */
10178         (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10179                                         sizeof(vsi->info.tc_mapping));
10180         (void)rte_memcpy(&vsi->info.queue_mapping,
10181                         &ctxt.info.queue_mapping,
10182                 sizeof(vsi->info.queue_mapping));
10183         vsi->info.mapping_flags = ctxt.info.mapping_flags;
10184         vsi->info.valid_sections = 0;
10185
10186         /* query and update current VSI BW information */
10187         ret = i40e_vsi_get_bw_config(vsi);
10188         if (ret) {
10189                 PMD_INIT_LOG(ERR,
10190                          "Failed updating vsi bw info, err %s aq_err %s",
10191                          i40e_stat_str(hw, ret),
10192                          i40e_aq_str(hw, hw->aq.asq_last_status));
10193                 goto out;
10194         }
10195
10196         vsi->enabled_tc = tc_map;
10197
10198 out:
10199         return ret;
10200 }
10201
10202 /*
10203  * i40e_dcb_hw_configure - program the dcb setting to hw
10204  * @pf: pf the configuration is taken on
10205  * @new_cfg: new configuration
10206  * @tc_map: enabled TC bitmap
10207  *
10208  * Returns 0 on success, negative value on failure
10209  */
10210 static enum i40e_status_code
10211 i40e_dcb_hw_configure(struct i40e_pf *pf,
10212                       struct i40e_dcbx_config *new_cfg,
10213                       uint8_t tc_map)
10214 {
10215         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10216         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10217         struct i40e_vsi *main_vsi = pf->main_vsi;
10218         struct i40e_vsi_list *vsi_list;
10219         enum i40e_status_code ret;
10220         int i;
10221         uint32_t val;
10222
10223         /* Use the FW API if FW > v4.4*/
10224         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10225               (hw->aq.fw_maj_ver >= 5))) {
10226                 PMD_INIT_LOG(ERR,
10227                         "FW < v4.4, can not use FW LLDP API to configure DCB");
10228                 return I40E_ERR_FIRMWARE_API_VERSION;
10229         }
10230
10231         /* Check if need reconfiguration */
10232         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10233                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10234                 return I40E_SUCCESS;
10235         }
10236
10237         /* Copy the new config to the current config */
10238         *old_cfg = *new_cfg;
10239         old_cfg->etsrec = old_cfg->etscfg;
10240         ret = i40e_set_dcb_config(hw);
10241         if (ret) {
10242                 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10243                          i40e_stat_str(hw, ret),
10244                          i40e_aq_str(hw, hw->aq.asq_last_status));
10245                 return ret;
10246         }
10247         /* set receive Arbiter to RR mode and ETS scheme by default */
10248         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10249                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10250                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10251                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10252                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10253                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10254                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10255                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10256                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10257                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10258                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10259                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10260                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10261         }
10262         /* get local mib to check whether it is configured correctly */
10263         /* IEEE mode */
10264         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10265         /* Get Local DCB Config */
10266         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10267                                      &hw->local_dcbx_config);
10268
10269         /* if Veb is created, need to update TC of it at first */
10270         if (main_vsi->veb) {
10271                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10272                 if (ret)
10273                         PMD_INIT_LOG(WARNING,
10274                                  "Failed configuring TC for VEB seid=%d",
10275                                  main_vsi->veb->seid);
10276         }
10277         /* Update each VSI */
10278         i40e_vsi_config_tc(main_vsi, tc_map);
10279         if (main_vsi->veb) {
10280                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10281                         /* Beside main VSI and VMDQ VSIs, only enable default
10282                          * TC for other VSIs
10283                          */
10284                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10285                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10286                                                          tc_map);
10287                         else
10288                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
10289                                                          I40E_DEFAULT_TCMAP);
10290                         if (ret)
10291                                 PMD_INIT_LOG(WARNING,
10292                                         "Failed configuring TC for VSI seid=%d",
10293                                         vsi_list->vsi->seid);
10294                         /* continue */
10295                 }
10296         }
10297         return I40E_SUCCESS;
10298 }
10299
10300 /*
10301  * i40e_dcb_init_configure - initial dcb config
10302  * @dev: device being configured
10303  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10304  *
10305  * Returns 0 on success, negative value on failure
10306  */
10307 static int
10308 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10309 {
10310         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10311         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10312         int i, ret = 0;
10313
10314         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10315                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10316                 return -ENOTSUP;
10317         }
10318
10319         /* DCB initialization:
10320          * Update DCB configuration from the Firmware and configure
10321          * LLDP MIB change event.
10322          */
10323         if (sw_dcb == TRUE) {
10324                 ret = i40e_init_dcb(hw);
10325                 /* If lldp agent is stopped, the return value from
10326                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10327                  * adminq status. Otherwise, it should return success.
10328                  */
10329                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10330                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10331                         memset(&hw->local_dcbx_config, 0,
10332                                 sizeof(struct i40e_dcbx_config));
10333                         /* set dcb default configuration */
10334                         hw->local_dcbx_config.etscfg.willing = 0;
10335                         hw->local_dcbx_config.etscfg.maxtcs = 0;
10336                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10337                         hw->local_dcbx_config.etscfg.tsatable[0] =
10338                                                 I40E_IEEE_TSA_ETS;
10339                         /* all UPs mapping to TC0 */
10340                         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10341                                 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10342                         hw->local_dcbx_config.etsrec =
10343                                 hw->local_dcbx_config.etscfg;
10344                         hw->local_dcbx_config.pfc.willing = 0;
10345                         hw->local_dcbx_config.pfc.pfccap =
10346                                                 I40E_MAX_TRAFFIC_CLASS;
10347                         /* FW needs one App to configure HW */
10348                         hw->local_dcbx_config.numapps = 1;
10349                         hw->local_dcbx_config.app[0].selector =
10350                                                 I40E_APP_SEL_ETHTYPE;
10351                         hw->local_dcbx_config.app[0].priority = 3;
10352                         hw->local_dcbx_config.app[0].protocolid =
10353                                                 I40E_APP_PROTOID_FCOE;
10354                         ret = i40e_set_dcb_config(hw);
10355                         if (ret) {
10356                                 PMD_INIT_LOG(ERR,
10357                                         "default dcb config fails. err = %d, aq_err = %d.",
10358                                         ret, hw->aq.asq_last_status);
10359                                 return -ENOSYS;
10360                         }
10361                 } else {
10362                         PMD_INIT_LOG(ERR,
10363                                 "DCB initialization in FW fails, err = %d, aq_err = %d.",
10364                                 ret, hw->aq.asq_last_status);
10365                         return -ENOTSUP;
10366                 }
10367         } else {
10368                 ret = i40e_aq_start_lldp(hw, NULL);
10369                 if (ret != I40E_SUCCESS)
10370                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10371
10372                 ret = i40e_init_dcb(hw);
10373                 if (!ret) {
10374                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10375                                 PMD_INIT_LOG(ERR,
10376                                         "HW doesn't support DCBX offload.");
10377                                 return -ENOTSUP;
10378                         }
10379                 } else {
10380                         PMD_INIT_LOG(ERR,
10381                                 "DCBX configuration failed, err = %d, aq_err = %d.",
10382                                 ret, hw->aq.asq_last_status);
10383                         return -ENOTSUP;
10384                 }
10385         }
10386         return 0;
10387 }
10388
10389 /*
10390  * i40e_dcb_setup - setup dcb related config
10391  * @dev: device being configured
10392  *
10393  * Returns 0 on success, negative value on failure
10394  */
10395 static int
10396 i40e_dcb_setup(struct rte_eth_dev *dev)
10397 {
10398         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10399         struct i40e_dcbx_config dcb_cfg;
10400         uint8_t tc_map = 0;
10401         int ret = 0;
10402
10403         if ((pf->flags & I40E_FLAG_DCB) == 0) {
10404                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10405                 return -ENOTSUP;
10406         }
10407
10408         if (pf->vf_num != 0)
10409                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
10410
10411         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
10412         if (ret) {
10413                 PMD_INIT_LOG(ERR, "invalid dcb config");
10414                 return -EINVAL;
10415         }
10416         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
10417         if (ret) {
10418                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
10419                 return -ENOSYS;
10420         }
10421
10422         return 0;
10423 }
10424
10425 static int
10426 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
10427                       struct rte_eth_dcb_info *dcb_info)
10428 {
10429         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10430         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10431         struct i40e_vsi *vsi = pf->main_vsi;
10432         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
10433         uint16_t bsf, tc_mapping;
10434         int i, j = 0;
10435
10436         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
10437                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
10438         else
10439                 dcb_info->nb_tcs = 1;
10440         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10441                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
10442         for (i = 0; i < dcb_info->nb_tcs; i++)
10443                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
10444
10445         /* get queue mapping if vmdq is disabled */
10446         if (!pf->nb_cfg_vmdq_vsi) {
10447                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10448                         if (!(vsi->enabled_tc & (1 << i)))
10449                                 continue;
10450                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10451                         dcb_info->tc_queue.tc_rxq[j][i].base =
10452                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10453                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10454                         dcb_info->tc_queue.tc_txq[j][i].base =
10455                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10456                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10457                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10458                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10459                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10460                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10461                 }
10462                 return 0;
10463         }
10464
10465         /* get queue mapping if vmdq is enabled */
10466         do {
10467                 vsi = pf->vmdq[j].vsi;
10468                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10469                         if (!(vsi->enabled_tc & (1 << i)))
10470                                 continue;
10471                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
10472                         dcb_info->tc_queue.tc_rxq[j][i].base =
10473                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
10474                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
10475                         dcb_info->tc_queue.tc_txq[j][i].base =
10476                                 dcb_info->tc_queue.tc_rxq[j][i].base;
10477                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
10478                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
10479                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
10480                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
10481                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
10482                 }
10483                 j++;
10484         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
10485         return 0;
10486 }
10487
10488 static int
10489 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
10490 {
10491         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10492         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10493         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10494         uint16_t interval =
10495                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
10496         uint16_t msix_intr;
10497
10498         msix_intr = intr_handle->intr_vec[queue_id];
10499         if (msix_intr == I40E_MISC_VEC_ID)
10500                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
10501                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10502                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10503                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10504                                (interval <<
10505                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10506         else
10507                 I40E_WRITE_REG(hw,
10508                                I40E_PFINT_DYN_CTLN(msix_intr -
10509                                                    I40E_RX_VEC_START),
10510                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
10511                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
10512                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
10513                                (interval <<
10514                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
10515
10516         I40E_WRITE_FLUSH(hw);
10517         rte_intr_enable(&pci_dev->intr_handle);
10518
10519         return 0;
10520 }
10521
10522 static int
10523 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
10524 {
10525         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10526         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
10527         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10528         uint16_t msix_intr;
10529
10530         msix_intr = intr_handle->intr_vec[queue_id];
10531         if (msix_intr == I40E_MISC_VEC_ID)
10532                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
10533         else
10534                 I40E_WRITE_REG(hw,
10535                                I40E_PFINT_DYN_CTLN(msix_intr -
10536                                                    I40E_RX_VEC_START),
10537                                0);
10538         I40E_WRITE_FLUSH(hw);
10539
10540         return 0;
10541 }
10542
10543 static int i40e_get_regs(struct rte_eth_dev *dev,
10544                          struct rte_dev_reg_info *regs)
10545 {
10546         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10547         uint32_t *ptr_data = regs->data;
10548         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
10549         const struct i40e_reg_info *reg_info;
10550
10551         if (ptr_data == NULL) {
10552                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
10553                 regs->width = sizeof(uint32_t);
10554                 return 0;
10555         }
10556
10557         /* The first few registers have to be read using AQ operations */
10558         reg_idx = 0;
10559         while (i40e_regs_adminq[reg_idx].name) {
10560                 reg_info = &i40e_regs_adminq[reg_idx++];
10561                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10562                         for (arr_idx2 = 0;
10563                                         arr_idx2 <= reg_info->count2;
10564                                         arr_idx2++) {
10565                                 reg_offset = arr_idx * reg_info->stride1 +
10566                                         arr_idx2 * reg_info->stride2;
10567                                 reg_offset += reg_info->base_addr;
10568                                 ptr_data[reg_offset >> 2] =
10569                                         i40e_read_rx_ctl(hw, reg_offset);
10570                         }
10571         }
10572
10573         /* The remaining registers can be read using primitives */
10574         reg_idx = 0;
10575         while (i40e_regs_others[reg_idx].name) {
10576                 reg_info = &i40e_regs_others[reg_idx++];
10577                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
10578                         for (arr_idx2 = 0;
10579                                         arr_idx2 <= reg_info->count2;
10580                                         arr_idx2++) {
10581                                 reg_offset = arr_idx * reg_info->stride1 +
10582                                         arr_idx2 * reg_info->stride2;
10583                                 reg_offset += reg_info->base_addr;
10584                                 ptr_data[reg_offset >> 2] =
10585                                         I40E_READ_REG(hw, reg_offset);
10586                         }
10587         }
10588
10589         return 0;
10590 }
10591
10592 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
10593 {
10594         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10595
10596         /* Convert word count to byte count */
10597         return hw->nvm.sr_size << 1;
10598 }
10599
10600 static int i40e_get_eeprom(struct rte_eth_dev *dev,
10601                            struct rte_dev_eeprom_info *eeprom)
10602 {
10603         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10604         uint16_t *data = eeprom->data;
10605         uint16_t offset, length, cnt_words;
10606         int ret_code;
10607
10608         offset = eeprom->offset >> 1;
10609         length = eeprom->length >> 1;
10610         cnt_words = length;
10611
10612         if (offset > hw->nvm.sr_size ||
10613                 offset + length > hw->nvm.sr_size) {
10614                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
10615                 return -EINVAL;
10616         }
10617
10618         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
10619
10620         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
10621         if (ret_code != I40E_SUCCESS || cnt_words != length) {
10622                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
10623                 return -EIO;
10624         }
10625
10626         return 0;
10627 }
10628
10629 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
10630                                       struct ether_addr *mac_addr)
10631 {
10632         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10633
10634         if (!is_valid_assigned_ether_addr(mac_addr)) {
10635                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
10636                 return;
10637         }
10638
10639         /* Flags: 0x3 updates port address */
10640         i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
10641 }
10642
10643 static int
10644 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10645 {
10646         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10647         struct rte_eth_dev_data *dev_data = pf->dev_data;
10648         uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
10649         int ret = 0;
10650
10651         /* check if mtu is within the allowed range */
10652         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
10653                 return -EINVAL;
10654
10655         /* mtu setting is forbidden if port is start */
10656         if (dev_data->dev_started) {
10657                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
10658                             dev_data->port_id);
10659                 return -EBUSY;
10660         }
10661
10662         if (frame_size > ETHER_MAX_LEN)
10663                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
10664         else
10665                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
10666
10667         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
10668
10669         return ret;
10670 }
10671
10672 /* Restore ethertype filter */
10673 static void
10674 i40e_ethertype_filter_restore(struct i40e_pf *pf)
10675 {
10676         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10677         struct i40e_ethertype_filter_list
10678                 *ethertype_list = &pf->ethertype.ethertype_list;
10679         struct i40e_ethertype_filter *f;
10680         struct i40e_control_filter_stats stats;
10681         uint16_t flags;
10682
10683         TAILQ_FOREACH(f, ethertype_list, rules) {
10684                 flags = 0;
10685                 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
10686                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10687                 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
10688                         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10689                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10690
10691                 memset(&stats, 0, sizeof(stats));
10692                 i40e_aq_add_rem_control_packet_filter(hw,
10693                                             f->input.mac_addr.addr_bytes,
10694                                             f->input.ether_type,
10695                                             flags, pf->main_vsi->seid,
10696                                             f->queue, 1, &stats, NULL);
10697         }
10698         PMD_DRV_LOG(INFO, "Ethertype filter:"
10699                     " mac_etype_used = %u, etype_used = %u,"
10700                     " mac_etype_free = %u, etype_free = %u",
10701                     stats.mac_etype_used, stats.etype_used,
10702                     stats.mac_etype_free, stats.etype_free);
10703 }
10704
10705 /* Restore tunnel filter */
10706 static void
10707 i40e_tunnel_filter_restore(struct i40e_pf *pf)
10708 {
10709         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10710         struct i40e_vsi *vsi;
10711         struct i40e_pf_vf *vf;
10712         struct i40e_tunnel_filter_list
10713                 *tunnel_list = &pf->tunnel.tunnel_list;
10714         struct i40e_tunnel_filter *f;
10715         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
10716         bool big_buffer = 0;
10717
10718         TAILQ_FOREACH(f, tunnel_list, rules) {
10719                 if (!f->is_to_vf)
10720                         vsi = pf->main_vsi;
10721                 else {
10722                         vf = &pf->vfs[f->vf_id];
10723                         vsi = vf->vsi;
10724                 }
10725                 memset(&cld_filter, 0, sizeof(cld_filter));
10726                 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
10727                         (struct ether_addr *)&cld_filter.element.outer_mac);
10728                 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
10729                         (struct ether_addr *)&cld_filter.element.inner_mac);
10730                 cld_filter.element.inner_vlan = f->input.inner_vlan;
10731                 cld_filter.element.flags = f->input.flags;
10732                 cld_filter.element.tenant_id = f->input.tenant_id;
10733                 cld_filter.element.queue_number = f->queue;
10734                 rte_memcpy(cld_filter.general_fields,
10735                            f->input.general_fields,
10736                            sizeof(f->input.general_fields));
10737
10738                 if (((f->input.flags &
10739                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
10740                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
10741                     ((f->input.flags &
10742                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
10743                      I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
10744                     ((f->input.flags &
10745                      I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
10746                      I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
10747                         big_buffer = 1;
10748
10749                 if (big_buffer)
10750                         i40e_aq_add_cloud_filters_big_buffer(hw,
10751                                              vsi->seid, &cld_filter, 1);
10752                 else
10753                         i40e_aq_add_cloud_filters(hw, vsi->seid,
10754                                                   &cld_filter.element, 1);
10755         }
10756 }
10757
10758 static void
10759 i40e_filter_restore(struct i40e_pf *pf)
10760 {
10761         i40e_ethertype_filter_restore(pf);
10762         i40e_tunnel_filter_restore(pf);
10763         i40e_fdir_filter_restore(pf);
10764 }
10765
10766 static bool
10767 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
10768 {
10769         if (strcmp(dev->device->driver->name, drv->driver.name))
10770                 return false;
10771
10772         return true;
10773 }
10774
10775 bool
10776 is_i40e_supported(struct rte_eth_dev *dev)
10777 {
10778         return is_device_supported(dev, &rte_i40e_pmd);
10779 }
10780
10781 /* Create a QinQ cloud filter
10782  *
10783  * The Fortville NIC has limited resources for tunnel filters,
10784  * so we can only reuse existing filters.
10785  *
10786  * In step 1 we define which Field Vector fields can be used for
10787  * filter types.
10788  * As we do not have the inner tag defined as a field,
10789  * we have to define it first, by reusing one of L1 entries.
10790  *
10791  * In step 2 we are replacing one of existing filter types with
10792  * a new one for QinQ.
10793  * As we reusing L1 and replacing L2, some of the default filter
10794  * types will disappear,which depends on L1 and L2 entries we reuse.
10795  *
10796  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
10797  *
10798  * 1.   Create L1 filter of outer vlan (12b) which will be in use
10799  *              later when we define the cloud filter.
10800  *      a.      Valid_flags.replace_cloud = 0
10801  *      b.      Old_filter = 10 (Stag_Inner_Vlan)
10802  *      c.      New_filter = 0x10
10803  *      d.      TR bit = 0xff (optional, not used here)
10804  *      e.      Buffer – 2 entries:
10805  *              i.      Byte 0 = 8 (outer vlan FV index).
10806  *                      Byte 1 = 0 (rsv)
10807  *                      Byte 2-3 = 0x0fff
10808  *              ii.     Byte 0 = 37 (inner vlan FV index).
10809  *                      Byte 1 =0 (rsv)
10810  *                      Byte 2-3 = 0x0fff
10811  *
10812  * Step 2:
10813  * 2.   Create cloud filter using two L1 filters entries: stag and
10814  *              new filter(outer vlan+ inner vlan)
10815  *      a.      Valid_flags.replace_cloud = 1
10816  *      b.      Old_filter = 1 (instead of outer IP)
10817  *      c.      New_filter = 0x10
10818  *      d.      Buffer – 2 entries:
10819  *              i.      Byte 0 = 0x80 | 7 (valid | Stag).
10820  *                      Byte 1-3 = 0 (rsv)
10821  *              ii.     Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
10822  *                      Byte 9-11 = 0 (rsv)
10823  */
10824 static int
10825 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
10826 {
10827         int ret = -ENOTSUP;
10828         struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
10829         struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
10830         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10831
10832         /* Init */
10833         memset(&filter_replace, 0,
10834                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
10835         memset(&filter_replace_buf, 0,
10836                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
10837
10838         /* create L1 filter */
10839         filter_replace.old_filter_type =
10840                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
10841         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
10842         filter_replace.tr_bit = 0;
10843
10844         /* Prepare the buffer, 2 entries */
10845         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
10846         filter_replace_buf.data[0] |=
10847                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10848         /* Field Vector 12b mask */
10849         filter_replace_buf.data[2] = 0xff;
10850         filter_replace_buf.data[3] = 0x0f;
10851         filter_replace_buf.data[4] =
10852                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
10853         filter_replace_buf.data[4] |=
10854                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10855         /* Field Vector 12b mask */
10856         filter_replace_buf.data[6] = 0xff;
10857         filter_replace_buf.data[7] = 0x0f;
10858         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
10859                         &filter_replace_buf);
10860         if (ret != I40E_SUCCESS)
10861                 return ret;
10862
10863         /* Apply the second L2 cloud filter */
10864         memset(&filter_replace, 0,
10865                sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
10866         memset(&filter_replace_buf, 0,
10867                sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
10868
10869         /* create L2 filter, input for L2 filter will be L1 filter  */
10870         filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
10871         filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
10872         filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
10873
10874         /* Prepare the buffer, 2 entries */
10875         filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
10876         filter_replace_buf.data[0] |=
10877                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10878         filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
10879         filter_replace_buf.data[4] |=
10880                 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
10881         ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
10882                         &filter_replace_buf);
10883         return ret;
10884 }
10885
10886 RTE_INIT(i40e_init_log);
10887 static void
10888 i40e_init_log(void)
10889 {
10890         i40e_logtype_init = rte_log_register("pmd.i40e.init");
10891         if (i40e_logtype_init >= 0)
10892                 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
10893         i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
10894         if (i40e_logtype_driver >= 0)
10895                 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
10896 }