net/i40e: fix DCB configuration
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40 #include <inttypes.h>
41 #include <assert.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53 #include <rte_tailq.h>
54
55 #include "i40e_logs.h"
56 #include "base/i40e_prototype.h"
57 #include "base/i40e_adminq_cmd.h"
58 #include "base/i40e_type.h"
59 #include "base/i40e_register.h"
60 #include "base/i40e_dcb.h"
61 #include "i40e_ethdev.h"
62 #include "i40e_rxtx.h"
63 #include "i40e_pf.h"
64 #include "i40e_regs.h"
65
66 #define ETH_I40E_FLOATING_VEB_ARG       "enable_floating_veb"
67 #define ETH_I40E_FLOATING_VEB_LIST_ARG  "floating_veb_list"
68
69 #define I40E_CLEAR_PXE_WAIT_MS     200
70
71 /* Maximun number of capability elements */
72 #define I40E_MAX_CAP_ELE_NUM       128
73
74 /* Wait count and inteval */
75 #define I40E_CHK_Q_ENA_COUNT       1000
76 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
77
78 /* Maximun number of VSI */
79 #define I40E_MAX_NUM_VSIS          (384UL)
80
81 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
82
83 /* Flow control default timer */
84 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
85
86 /* Flow control default high water */
87 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
88
89 /* Flow control default low water */
90 #define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
91
92 /* Flow control enable fwd bit */
93 #define I40E_PRTMAC_FWD_CTRL   0x00000001
94
95 /* Receive Packet Buffer size */
96 #define I40E_RXPBSIZE (968 * 1024)
97
98 /* Kilobytes shift */
99 #define I40E_KILOSHIFT 10
100
101 /* Receive Average Packet Size in Byte*/
102 #define I40E_PACKET_AVERAGE_SIZE 128
103
104 /* Mask of PF interrupt causes */
105 #define I40E_PFINT_ICR0_ENA_MASK ( \
106                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
107                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
108                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
109                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
110                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
111                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
112                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
113                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
114                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
115                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
116
117 #define I40E_FLOW_TYPES ( \
118         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
119         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
120         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
121         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
122         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
123         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
124         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
125         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
126         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
127         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
128         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
129
130 /* Additional timesync values. */
131 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
132 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
133 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
134 #define I40E_PRTTSYN_TSYNENA     0x80000000
135 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
136 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
137
138 #define I40E_MAX_PERCENT            100
139 #define I40E_DEFAULT_DCB_APP_NUM    1
140 #define I40E_DEFAULT_DCB_APP_PRIO   3
141
142 #define I40E_INSET_NONE            0x00000000000000000ULL
143
144 /* bit0 ~ bit 7 */
145 #define I40E_INSET_DMAC            0x0000000000000001ULL
146 #define I40E_INSET_SMAC            0x0000000000000002ULL
147 #define I40E_INSET_VLAN_OUTER      0x0000000000000004ULL
148 #define I40E_INSET_VLAN_INNER      0x0000000000000008ULL
149 #define I40E_INSET_VLAN_TUNNEL     0x0000000000000010ULL
150
151 /* bit 8 ~ bit 15 */
152 #define I40E_INSET_IPV4_SRC        0x0000000000000100ULL
153 #define I40E_INSET_IPV4_DST        0x0000000000000200ULL
154 #define I40E_INSET_IPV6_SRC        0x0000000000000400ULL
155 #define I40E_INSET_IPV6_DST        0x0000000000000800ULL
156 #define I40E_INSET_SRC_PORT        0x0000000000001000ULL
157 #define I40E_INSET_DST_PORT        0x0000000000002000ULL
158 #define I40E_INSET_SCTP_VT         0x0000000000004000ULL
159
160 /* bit 16 ~ bit 31 */
161 #define I40E_INSET_IPV4_TOS        0x0000000000010000ULL
162 #define I40E_INSET_IPV4_PROTO      0x0000000000020000ULL
163 #define I40E_INSET_IPV4_TTL        0x0000000000040000ULL
164 #define I40E_INSET_IPV6_TC         0x0000000000080000ULL
165 #define I40E_INSET_IPV6_FLOW       0x0000000000100000ULL
166 #define I40E_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
167 #define I40E_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
168 #define I40E_INSET_TCP_FLAGS       0x0000000000800000ULL
169
170 /* bit 32 ~ bit 47, tunnel fields */
171 #define I40E_INSET_TUNNEL_IPV4_DST       0x0000000100000000ULL
172 #define I40E_INSET_TUNNEL_IPV6_DST       0x0000000200000000ULL
173 #define I40E_INSET_TUNNEL_DMAC           0x0000000400000000ULL
174 #define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
175 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
176 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
177
178 /* bit 48 ~ bit 55 */
179 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
180
181 /* bit 56 ~ bit 63, Flex Payload */
182 #define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
183 #define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
184 #define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
185 #define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
186 #define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
187 #define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
188 #define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
189 #define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
190 #define I40E_INSET_FLEX_PAYLOAD \
191         (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
192         I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
193         I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
194         I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
195
196 /**
197  * Below are values for writing un-exposed registers suggested
198  * by silicon experts
199  */
200 /* Destination MAC address */
201 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
202 /* Source MAC address */
203 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
204 /* Outer (S-Tag) VLAN tag in the outer L2 header */
205 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
206 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
207 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
208 /* Single VLAN tag in the inner L2 header */
209 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
210 /* Source IPv4 address */
211 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
212 /* Destination IPv4 address */
213 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
214 /* Source IPv4 address for X722 */
215 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
216 /* Destination IPv4 address for X722 */
217 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
218 /* IPv4 Protocol for X722 */
219 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
220 /* IPv4 Time to Live for X722 */
221 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
222 /* IPv4 Type of Service (TOS) */
223 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
224 /* IPv4 Protocol */
225 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
226 /* IPv4 Time to Live */
227 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
228 /* Source IPv6 address */
229 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
230 /* Destination IPv6 address */
231 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
232 /* IPv6 Traffic Class (TC) */
233 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
234 /* IPv6 Next Header */
235 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
236 /* IPv6 Hop Limit */
237 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
238 /* Source L4 port */
239 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
240 /* Destination L4 port */
241 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
242 /* SCTP verification tag */
243 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
244 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
245 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
246 /* Source port of tunneling UDP */
247 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
248 /* Destination port of tunneling UDP */
249 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
250 /* UDP Tunneling ID, NVGRE/GRE key */
251 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
252 /* Last ether type */
253 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
254 /* Tunneling outer destination IPv4 address */
255 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
256 /* Tunneling outer destination IPv6 address */
257 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
258 /* 1st word of flex payload */
259 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
260 /* 2nd word of flex payload */
261 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
262 /* 3rd word of flex payload */
263 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
264 /* 4th word of flex payload */
265 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
266 /* 5th word of flex payload */
267 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
268 /* 6th word of flex payload */
269 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
270 /* 7th word of flex payload */
271 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
272 /* 8th word of flex payload */
273 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
274 /* all 8 words flex payload */
275 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
276 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
277
278 #define I40E_TRANSLATE_INSET 0
279 #define I40E_TRANSLATE_REG   1
280
281 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
282 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
283 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
284 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
285 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
286 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
287
288 #define I40E_GL_SWT_L2TAGCTRL(_i)             (0x001C0A70 + ((_i) * 4))
289 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
290 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK  \
291         I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
292
293 /* PCI offset for querying capability */
294 #define PCI_DEV_CAP_REG            0xA4
295 /* PCI offset for enabling/disabling Extended Tag */
296 #define PCI_DEV_CTRL_REG           0xA8
297 /* Bit mask of Extended Tag capability */
298 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
299 /* Bit shift of Extended Tag enable/disable */
300 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
301 /* Bit mask of Extended Tag enable/disable */
302 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
303
304 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
305 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
306 static int i40e_dev_configure(struct rte_eth_dev *dev);
307 static int i40e_dev_start(struct rte_eth_dev *dev);
308 static void i40e_dev_stop(struct rte_eth_dev *dev);
309 static void i40e_dev_close(struct rte_eth_dev *dev);
310 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
311 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
312 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
313 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
314 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
315 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
316 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
317                                struct rte_eth_stats *stats);
318 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
319                                struct rte_eth_xstat *xstats, unsigned n);
320 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
321                                      struct rte_eth_xstat_name *xstats_names,
322                                      unsigned limit);
323 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
324 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
325                                             uint16_t queue_id,
326                                             uint8_t stat_idx,
327                                             uint8_t is_rx);
328 static void i40e_dev_info_get(struct rte_eth_dev *dev,
329                               struct rte_eth_dev_info *dev_info);
330 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
331                                 uint16_t vlan_id,
332                                 int on);
333 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
334                               enum rte_vlan_type vlan_type,
335                               uint16_t tpid);
336 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
337 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
338                                       uint16_t queue,
339                                       int on);
340 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
341 static int i40e_dev_led_on(struct rte_eth_dev *dev);
342 static int i40e_dev_led_off(struct rte_eth_dev *dev);
343 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
344                               struct rte_eth_fc_conf *fc_conf);
345 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
346                               struct rte_eth_fc_conf *fc_conf);
347 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
348                                        struct rte_eth_pfc_conf *pfc_conf);
349 static void i40e_macaddr_add(struct rte_eth_dev *dev,
350                           struct ether_addr *mac_addr,
351                           uint32_t index,
352                           uint32_t pool);
353 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
354 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
355                                     struct rte_eth_rss_reta_entry64 *reta_conf,
356                                     uint16_t reta_size);
357 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
358                                    struct rte_eth_rss_reta_entry64 *reta_conf,
359                                    uint16_t reta_size);
360
361 static int i40e_get_cap(struct i40e_hw *hw);
362 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
363 static int i40e_pf_setup(struct i40e_pf *pf);
364 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
365 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
366 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
367 static int i40e_dcb_setup(struct rte_eth_dev *dev);
368 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
369                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
370 static void i40e_stat_update_48(struct i40e_hw *hw,
371                                uint32_t hireg,
372                                uint32_t loreg,
373                                bool offset_loaded,
374                                uint64_t *offset,
375                                uint64_t *stat);
376 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
377 static void i40e_dev_interrupt_handler(
378                 __rte_unused struct rte_intr_handle *handle, void *param);
379 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
380                                 uint32_t base, uint32_t num);
381 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
382 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
383                         uint32_t base);
384 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
385                         uint16_t num);
386 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
387 static int i40e_veb_release(struct i40e_veb *veb);
388 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
389                                                 struct i40e_vsi *vsi);
390 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
391 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
392 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
393                                              struct i40e_macvlan_filter *mv_f,
394                                              int num,
395                                              struct ether_addr *addr);
396 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
397                                              struct i40e_macvlan_filter *mv_f,
398                                              int num,
399                                              uint16_t vlan);
400 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
401 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
402                                     struct rte_eth_rss_conf *rss_conf);
403 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
404                                       struct rte_eth_rss_conf *rss_conf);
405 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
406                                         struct rte_eth_udp_tunnel *udp_tunnel);
407 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
408                                         struct rte_eth_udp_tunnel *udp_tunnel);
409 static void i40e_filter_input_set_init(struct i40e_pf *pf);
410 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
411                         struct rte_eth_ethertype_filter *filter,
412                         bool add);
413 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
414                                 enum rte_filter_op filter_op,
415                                 void *arg);
416 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
417                                 enum rte_filter_type filter_type,
418                                 enum rte_filter_op filter_op,
419                                 void *arg);
420 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
421                                   struct rte_eth_dcb_info *dcb_info);
422 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
423 static void i40e_configure_registers(struct i40e_hw *hw);
424 static void i40e_hw_init(struct rte_eth_dev *dev);
425 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
426 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
427                         struct rte_eth_mirror_conf *mirror_conf,
428                         uint8_t sw_id, uint8_t on);
429 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
430
431 static int i40e_timesync_enable(struct rte_eth_dev *dev);
432 static int i40e_timesync_disable(struct rte_eth_dev *dev);
433 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
434                                            struct timespec *timestamp,
435                                            uint32_t flags);
436 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
437                                            struct timespec *timestamp);
438 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
439
440 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
441
442 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
443                                    struct timespec *timestamp);
444 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
445                                     const struct timespec *timestamp);
446
447 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
448                                          uint16_t queue_id);
449 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
450                                           uint16_t queue_id);
451
452 static int i40e_get_regs(struct rte_eth_dev *dev,
453                          struct rte_dev_reg_info *regs);
454
455 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
456
457 static int i40e_get_eeprom(struct rte_eth_dev *dev,
458                            struct rte_dev_eeprom_info *eeprom);
459
460 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
461                                       struct ether_addr *mac_addr);
462
463 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
464
465 static const struct rte_pci_id pci_id_i40e_map[] = {
466         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
467         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
468         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
469         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
470         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
471         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
472         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
473         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
474         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
475         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
476         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
477         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
478         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
479         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
480         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
481         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
482         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
483         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
484         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
485         { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
486         { .vendor_id = 0, /* sentinel */ },
487 };
488
489 static const struct eth_dev_ops i40e_eth_dev_ops = {
490         .dev_configure                = i40e_dev_configure,
491         .dev_start                    = i40e_dev_start,
492         .dev_stop                     = i40e_dev_stop,
493         .dev_close                    = i40e_dev_close,
494         .promiscuous_enable           = i40e_dev_promiscuous_enable,
495         .promiscuous_disable          = i40e_dev_promiscuous_disable,
496         .allmulticast_enable          = i40e_dev_allmulticast_enable,
497         .allmulticast_disable         = i40e_dev_allmulticast_disable,
498         .dev_set_link_up              = i40e_dev_set_link_up,
499         .dev_set_link_down            = i40e_dev_set_link_down,
500         .link_update                  = i40e_dev_link_update,
501         .stats_get                    = i40e_dev_stats_get,
502         .xstats_get                   = i40e_dev_xstats_get,
503         .xstats_get_names             = i40e_dev_xstats_get_names,
504         .stats_reset                  = i40e_dev_stats_reset,
505         .xstats_reset                 = i40e_dev_stats_reset,
506         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
507         .dev_infos_get                = i40e_dev_info_get,
508         .dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
509         .vlan_filter_set              = i40e_vlan_filter_set,
510         .vlan_tpid_set                = i40e_vlan_tpid_set,
511         .vlan_offload_set             = i40e_vlan_offload_set,
512         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
513         .vlan_pvid_set                = i40e_vlan_pvid_set,
514         .rx_queue_start               = i40e_dev_rx_queue_start,
515         .rx_queue_stop                = i40e_dev_rx_queue_stop,
516         .tx_queue_start               = i40e_dev_tx_queue_start,
517         .tx_queue_stop                = i40e_dev_tx_queue_stop,
518         .rx_queue_setup               = i40e_dev_rx_queue_setup,
519         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
520         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
521         .rx_queue_release             = i40e_dev_rx_queue_release,
522         .rx_queue_count               = i40e_dev_rx_queue_count,
523         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
524         .tx_queue_setup               = i40e_dev_tx_queue_setup,
525         .tx_queue_release             = i40e_dev_tx_queue_release,
526         .dev_led_on                   = i40e_dev_led_on,
527         .dev_led_off                  = i40e_dev_led_off,
528         .flow_ctrl_get                = i40e_flow_ctrl_get,
529         .flow_ctrl_set                = i40e_flow_ctrl_set,
530         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
531         .mac_addr_add                 = i40e_macaddr_add,
532         .mac_addr_remove              = i40e_macaddr_remove,
533         .reta_update                  = i40e_dev_rss_reta_update,
534         .reta_query                   = i40e_dev_rss_reta_query,
535         .rss_hash_update              = i40e_dev_rss_hash_update,
536         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
537         .udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
538         .udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
539         .filter_ctrl                  = i40e_dev_filter_ctrl,
540         .rxq_info_get                 = i40e_rxq_info_get,
541         .txq_info_get                 = i40e_txq_info_get,
542         .mirror_rule_set              = i40e_mirror_rule_set,
543         .mirror_rule_reset            = i40e_mirror_rule_reset,
544         .timesync_enable              = i40e_timesync_enable,
545         .timesync_disable             = i40e_timesync_disable,
546         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
547         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
548         .get_dcb_info                 = i40e_dev_get_dcb_info,
549         .timesync_adjust_time         = i40e_timesync_adjust_time,
550         .timesync_read_time           = i40e_timesync_read_time,
551         .timesync_write_time          = i40e_timesync_write_time,
552         .get_reg                      = i40e_get_regs,
553         .get_eeprom_length            = i40e_get_eeprom_length,
554         .get_eeprom                   = i40e_get_eeprom,
555         .mac_addr_set                 = i40e_set_default_mac_addr,
556         .mtu_set                      = i40e_dev_mtu_set,
557 };
558
559 /* store statistics names and its offset in stats structure */
560 struct rte_i40e_xstats_name_off {
561         char name[RTE_ETH_XSTATS_NAME_SIZE];
562         unsigned offset;
563 };
564
565 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
566         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
567         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
568         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
569         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
570         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
571                 rx_unknown_protocol)},
572         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
573         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
574         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
575         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
576 };
577
578 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
579                 sizeof(rte_i40e_stats_strings[0]))
580
581 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
582         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
583                 tx_dropped_link_down)},
584         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
585         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
586                 illegal_bytes)},
587         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
588         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
589                 mac_local_faults)},
590         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
591                 mac_remote_faults)},
592         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
593                 rx_length_errors)},
594         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
595         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
596         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
597         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
598         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
599         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
600                 rx_size_127)},
601         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
602                 rx_size_255)},
603         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
604                 rx_size_511)},
605         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
606                 rx_size_1023)},
607         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
608                 rx_size_1522)},
609         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
610                 rx_size_big)},
611         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
612                 rx_undersize)},
613         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
614                 rx_oversize)},
615         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
616                 mac_short_packet_dropped)},
617         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
618                 rx_fragments)},
619         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
620         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
621         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
622                 tx_size_127)},
623         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
624                 tx_size_255)},
625         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
626                 tx_size_511)},
627         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
628                 tx_size_1023)},
629         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
630                 tx_size_1522)},
631         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
632                 tx_size_big)},
633         {"rx_flow_director_atr_match_packets",
634                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
635         {"rx_flow_director_sb_match_packets",
636                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
637         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
638                 tx_lpi_status)},
639         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
640                 rx_lpi_status)},
641         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
642                 tx_lpi_count)},
643         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
644                 rx_lpi_count)},
645 };
646
647 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
648                 sizeof(rte_i40e_hw_port_strings[0]))
649
650 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
651         {"xon_packets", offsetof(struct i40e_hw_port_stats,
652                 priority_xon_rx)},
653         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
654                 priority_xoff_rx)},
655 };
656
657 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
658                 sizeof(rte_i40e_rxq_prio_strings[0]))
659
660 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
661         {"xon_packets", offsetof(struct i40e_hw_port_stats,
662                 priority_xon_tx)},
663         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
664                 priority_xoff_tx)},
665         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
666                 priority_xon_2_xoff)},
667 };
668
669 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
670                 sizeof(rte_i40e_txq_prio_strings[0]))
671
672 static struct eth_driver rte_i40e_pmd = {
673         .pci_drv = {
674                 .id_table = pci_id_i40e_map,
675                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
676                         RTE_PCI_DRV_DETACHABLE,
677                 .probe = rte_eth_dev_pci_probe,
678                 .remove = rte_eth_dev_pci_remove,
679         },
680         .eth_dev_init = eth_i40e_dev_init,
681         .eth_dev_uninit = eth_i40e_dev_uninit,
682         .dev_private_size = sizeof(struct i40e_adapter),
683 };
684
685 static inline int
686 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
687                                      struct rte_eth_link *link)
688 {
689         struct rte_eth_link *dst = link;
690         struct rte_eth_link *src = &(dev->data->dev_link);
691
692         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
693                                         *(uint64_t *)src) == 0)
694                 return -1;
695
696         return 0;
697 }
698
699 static inline int
700 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
701                                       struct rte_eth_link *link)
702 {
703         struct rte_eth_link *dst = &(dev->data->dev_link);
704         struct rte_eth_link *src = link;
705
706         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
707                                         *(uint64_t *)src) == 0)
708                 return -1;
709
710         return 0;
711 }
712
713 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
714 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
715
716 #ifndef I40E_GLQF_ORT
717 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
718 #endif
719 #ifndef I40E_GLQF_PIT
720 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
721 #endif
722
723 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
724 {
725         /*
726          * Initialize registers for flexible payload, which should be set by NVM.
727          * This should be removed from code once it is fixed in NVM.
728          */
729         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
730         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
731         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
732         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
733         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
734         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
735         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
736         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
737         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
738         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
739         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
740         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
741
742         /* Initialize registers for parsing packet type of QinQ */
743         I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
744         I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
745 }
746
747 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
748
749 /*
750  * Add a ethertype filter to drop all flow control frames transmitted
751  * from VSIs.
752 */
753 static void
754 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
755 {
756         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
757         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
758                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
759                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
760         int ret;
761
762         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
763                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
764                                 pf->main_vsi_seid, 0,
765                                 TRUE, NULL, NULL);
766         if (ret)
767                 PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
768                                   " frames from VSIs.");
769 }
770
771 static int
772 floating_veb_list_handler(__rte_unused const char *key,
773                           const char *floating_veb_value,
774                           void *opaque)
775 {
776         int idx = 0;
777         unsigned int count = 0;
778         char *end = NULL;
779         int min, max;
780         bool *vf_floating_veb = opaque;
781
782         while (isblank(*floating_veb_value))
783                 floating_veb_value++;
784
785         /* Reset floating VEB configuration for VFs */
786         for (idx = 0; idx < I40E_MAX_VF; idx++)
787                 vf_floating_veb[idx] = false;
788
789         min = I40E_MAX_VF;
790         do {
791                 while (isblank(*floating_veb_value))
792                         floating_veb_value++;
793                 if (*floating_veb_value == '\0')
794                         return -1;
795                 errno = 0;
796                 idx = strtoul(floating_veb_value, &end, 10);
797                 if (errno || end == NULL)
798                         return -1;
799                 while (isblank(*end))
800                         end++;
801                 if (*end == '-') {
802                         min = idx;
803                 } else if ((*end == ';') || (*end == '\0')) {
804                         max = idx;
805                         if (min == I40E_MAX_VF)
806                                 min = idx;
807                         if (max >= I40E_MAX_VF)
808                                 max = I40E_MAX_VF - 1;
809                         for (idx = min; idx <= max; idx++) {
810                                 vf_floating_veb[idx] = true;
811                                 count++;
812                         }
813                         min = I40E_MAX_VF;
814                 } else {
815                         return -1;
816                 }
817                 floating_veb_value = end + 1;
818         } while (*end != '\0');
819
820         if (count == 0)
821                 return -1;
822
823         return 0;
824 }
825
826 static void
827 config_vf_floating_veb(struct rte_devargs *devargs,
828                        uint16_t floating_veb,
829                        bool *vf_floating_veb)
830 {
831         struct rte_kvargs *kvlist;
832         int i;
833         const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
834
835         if (!floating_veb)
836                 return;
837         /* All the VFs attach to the floating VEB by default
838          * when the floating VEB is enabled.
839          */
840         for (i = 0; i < I40E_MAX_VF; i++)
841                 vf_floating_veb[i] = true;
842
843         if (devargs == NULL)
844                 return;
845
846         kvlist = rte_kvargs_parse(devargs->args, NULL);
847         if (kvlist == NULL)
848                 return;
849
850         if (!rte_kvargs_count(kvlist, floating_veb_list)) {
851                 rte_kvargs_free(kvlist);
852                 return;
853         }
854         /* When the floating_veb_list parameter exists, all the VFs
855          * will attach to the legacy VEB firstly, then configure VFs
856          * to the floating VEB according to the floating_veb_list.
857          */
858         if (rte_kvargs_process(kvlist, floating_veb_list,
859                                floating_veb_list_handler,
860                                vf_floating_veb) < 0) {
861                 rte_kvargs_free(kvlist);
862                 return;
863         }
864         rte_kvargs_free(kvlist);
865 }
866
867 static int
868 i40e_check_floating_handler(__rte_unused const char *key,
869                             const char *value,
870                             __rte_unused void *opaque)
871 {
872         if (strcmp(value, "1"))
873                 return -1;
874
875         return 0;
876 }
877
878 static int
879 is_floating_veb_supported(struct rte_devargs *devargs)
880 {
881         struct rte_kvargs *kvlist;
882         const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
883
884         if (devargs == NULL)
885                 return 0;
886
887         kvlist = rte_kvargs_parse(devargs->args, NULL);
888         if (kvlist == NULL)
889                 return 0;
890
891         if (!rte_kvargs_count(kvlist, floating_veb_key)) {
892                 rte_kvargs_free(kvlist);
893                 return 0;
894         }
895         /* Floating VEB is enabled when there's key-value:
896          * enable_floating_veb=1
897          */
898         if (rte_kvargs_process(kvlist, floating_veb_key,
899                                i40e_check_floating_handler, NULL) < 0) {
900                 rte_kvargs_free(kvlist);
901                 return 0;
902         }
903         rte_kvargs_free(kvlist);
904
905         return 1;
906 }
907
908 static void
909 config_floating_veb(struct rte_eth_dev *dev)
910 {
911         struct rte_pci_device *pci_dev = dev->pci_dev;
912         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
913         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
914
915         memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
916
917         if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
918                 pf->floating_veb =
919                         is_floating_veb_supported(pci_dev->device.devargs);
920                 config_vf_floating_veb(pci_dev->device.devargs,
921                                        pf->floating_veb,
922                                        pf->floating_veb_list);
923         } else {
924                 pf->floating_veb = false;
925         }
926 }
927
928 #define I40E_L2_TAGS_S_TAG_SHIFT 1
929 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
930
931 static int
932 eth_i40e_dev_init(struct rte_eth_dev *dev)
933 {
934         struct rte_pci_device *pci_dev;
935         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
936         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
937         struct i40e_vsi *vsi;
938         int ret;
939         uint32_t len;
940         uint8_t aq_fail = 0;
941
942         PMD_INIT_FUNC_TRACE();
943
944         dev->dev_ops = &i40e_eth_dev_ops;
945         dev->rx_pkt_burst = i40e_recv_pkts;
946         dev->tx_pkt_burst = i40e_xmit_pkts;
947
948         /* for secondary processes, we don't initialise any further as primary
949          * has already done this work. Only check we don't need a different
950          * RX function */
951         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
952                 i40e_set_rx_function(dev);
953                 i40e_set_tx_function(dev);
954                 return 0;
955         }
956         pci_dev = dev->pci_dev;
957
958         rte_eth_copy_pci_info(dev, pci_dev);
959
960         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
961         pf->adapter->eth_dev = dev;
962         pf->dev_data = dev->data;
963
964         hw->back = I40E_PF_TO_ADAPTER(pf);
965         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
966         if (!hw->hw_addr) {
967                 PMD_INIT_LOG(ERR, "Hardware is not available, "
968                              "as address is NULL");
969                 return -ENODEV;
970         }
971
972         hw->vendor_id = pci_dev->id.vendor_id;
973         hw->device_id = pci_dev->id.device_id;
974         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
975         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
976         hw->bus.device = pci_dev->addr.devid;
977         hw->bus.func = pci_dev->addr.function;
978         hw->adapter_stopped = 0;
979
980         /* Make sure all is clean before doing PF reset */
981         i40e_clear_hw(hw);
982
983         /* Initialize the hardware */
984         i40e_hw_init(dev);
985
986         /* Reset here to make sure all is clean for each PF */
987         ret = i40e_pf_reset(hw);
988         if (ret) {
989                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
990                 return ret;
991         }
992
993         /* Initialize the shared code (base driver) */
994         ret = i40e_init_shared_code(hw);
995         if (ret) {
996                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
997                 return ret;
998         }
999
1000         /*
1001          * To work around the NVM issue, initialize registers
1002          * for flexible payload and packet type of QinQ by
1003          * software. It should be removed once issues are fixed
1004          * in NVM.
1005          */
1006         i40e_GLQF_reg_init(hw);
1007
1008         /* Initialize the input set for filters (hash and fd) to default value */
1009         i40e_filter_input_set_init(pf);
1010
1011         /* Initialize the parameters for adminq */
1012         i40e_init_adminq_parameter(hw);
1013         ret = i40e_init_adminq(hw);
1014         if (ret != I40E_SUCCESS) {
1015                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1016                 return -EIO;
1017         }
1018         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1019                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1020                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
1021                      ((hw->nvm.version >> 12) & 0xf),
1022                      ((hw->nvm.version >> 4) & 0xff),
1023                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
1024
1025         /* Need the special FW version to support floating VEB */
1026         config_floating_veb(dev);
1027         /* Clear PXE mode */
1028         i40e_clear_pxe_mode(hw);
1029         ret = i40e_dev_sync_phy_type(hw);
1030         if (ret) {
1031                 PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
1032                 goto err_sync_phy_type;
1033         }
1034         /*
1035          * On X710, performance number is far from the expectation on recent
1036          * firmware versions. The fix for this issue may not be integrated in
1037          * the following firmware version. So the workaround in software driver
1038          * is needed. It needs to modify the initial values of 3 internal only
1039          * registers. Note that the workaround can be removed when it is fixed
1040          * in firmware in the future.
1041          */
1042         i40e_configure_registers(hw);
1043
1044         /* Get hw capabilities */
1045         ret = i40e_get_cap(hw);
1046         if (ret != I40E_SUCCESS) {
1047                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1048                 goto err_get_capabilities;
1049         }
1050
1051         /* Initialize parameters for PF */
1052         ret = i40e_pf_parameter_init(dev);
1053         if (ret != 0) {
1054                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1055                 goto err_parameter_init;
1056         }
1057
1058         /* Initialize the queue management */
1059         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1060         if (ret < 0) {
1061                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1062                 goto err_qp_pool_init;
1063         }
1064         ret = i40e_res_pool_init(&pf->msix_pool, 1,
1065                                 hw->func_caps.num_msix_vectors - 1);
1066         if (ret < 0) {
1067                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1068                 goto err_msix_pool_init;
1069         }
1070
1071         /* Initialize lan hmc */
1072         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1073                                 hw->func_caps.num_rx_qp, 0, 0);
1074         if (ret != I40E_SUCCESS) {
1075                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1076                 goto err_init_lan_hmc;
1077         }
1078
1079         /* Configure lan hmc */
1080         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1081         if (ret != I40E_SUCCESS) {
1082                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1083                 goto err_configure_lan_hmc;
1084         }
1085
1086         /* Get and check the mac address */
1087         i40e_get_mac_addr(hw, hw->mac.addr);
1088         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1089                 PMD_INIT_LOG(ERR, "mac address is not valid");
1090                 ret = -EIO;
1091                 goto err_get_mac_addr;
1092         }
1093         /* Copy the permanent MAC address */
1094         ether_addr_copy((struct ether_addr *) hw->mac.addr,
1095                         (struct ether_addr *) hw->mac.perm_addr);
1096
1097         /* Disable flow control */
1098         hw->fc.requested_mode = I40E_FC_NONE;
1099         i40e_set_fc(hw, &aq_fail, TRUE);
1100
1101         /* Set the global registers with default ether type value */
1102         ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1103         if (ret != I40E_SUCCESS) {
1104                 PMD_INIT_LOG(ERR, "Failed to set the default outer "
1105                              "VLAN ether type");
1106                 goto err_setup_pf_switch;
1107         }
1108
1109         /* PF setup, which includes VSI setup */
1110         ret = i40e_pf_setup(pf);
1111         if (ret) {
1112                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1113                 goto err_setup_pf_switch;
1114         }
1115
1116         /* reset all stats of the device, including pf and main vsi */
1117         i40e_dev_stats_reset(dev);
1118
1119         vsi = pf->main_vsi;
1120
1121         /* Disable double vlan by default */
1122         i40e_vsi_config_double_vlan(vsi, FALSE);
1123
1124         /* Disable S-TAG identification by default */
1125         ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1126         if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1127                 ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1128                 I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1129         }
1130
1131         if (!vsi->max_macaddrs)
1132                 len = ETHER_ADDR_LEN;
1133         else
1134                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1135
1136         /* Should be after VSI initialized */
1137         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1138         if (!dev->data->mac_addrs) {
1139                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
1140                                         "for storing mac address");
1141                 goto err_mac_alloc;
1142         }
1143         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1144                                         &dev->data->mac_addrs[0]);
1145
1146         /* initialize pf host driver to setup SRIOV resource if applicable */
1147         i40e_pf_host_init(dev);
1148
1149         /* register callback func to eal lib */
1150         rte_intr_callback_register(&(pci_dev->intr_handle),
1151                 i40e_dev_interrupt_handler, (void *)dev);
1152
1153         /* configure and enable device interrupt */
1154         i40e_pf_config_irq0(hw, TRUE);
1155         i40e_pf_enable_irq0(hw);
1156
1157         /* enable uio intr after callback register */
1158         rte_intr_enable(&(pci_dev->intr_handle));
1159         /*
1160          * Add an ethertype filter to drop all flow control frames transmitted
1161          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1162          * frames to wire.
1163          */
1164         i40e_add_tx_flow_control_drop_filter(pf);
1165
1166         /* Set the max frame size to 0x2600 by default,
1167          * in case other drivers changed the default value.
1168          */
1169         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1170
1171         /* initialize mirror rule list */
1172         TAILQ_INIT(&pf->mirror_list);
1173
1174         /* Init dcb to sw mode by default */
1175         ret = i40e_dcb_init_configure(dev, TRUE);
1176         if (ret != I40E_SUCCESS) {
1177                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1178                 pf->flags &= ~I40E_FLAG_DCB;
1179         }
1180
1181         return 0;
1182
1183 err_mac_alloc:
1184         i40e_vsi_release(pf->main_vsi);
1185 err_setup_pf_switch:
1186 err_get_mac_addr:
1187 err_configure_lan_hmc:
1188         (void)i40e_shutdown_lan_hmc(hw);
1189 err_init_lan_hmc:
1190         i40e_res_pool_destroy(&pf->msix_pool);
1191 err_msix_pool_init:
1192         i40e_res_pool_destroy(&pf->qp_pool);
1193 err_qp_pool_init:
1194 err_parameter_init:
1195 err_get_capabilities:
1196 err_sync_phy_type:
1197         (void)i40e_shutdown_adminq(hw);
1198
1199         return ret;
1200 }
1201
1202 static int
1203 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1204 {
1205         struct rte_pci_device *pci_dev;
1206         struct i40e_hw *hw;
1207         struct i40e_filter_control_settings settings;
1208         int ret;
1209         uint8_t aq_fail = 0;
1210
1211         PMD_INIT_FUNC_TRACE();
1212
1213         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1214                 return 0;
1215
1216         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1217         pci_dev = dev->pci_dev;
1218
1219         if (hw->adapter_stopped == 0)
1220                 i40e_dev_close(dev);
1221
1222         dev->dev_ops = NULL;
1223         dev->rx_pkt_burst = NULL;
1224         dev->tx_pkt_burst = NULL;
1225
1226         /* Clear PXE mode */
1227         i40e_clear_pxe_mode(hw);
1228
1229         /* Unconfigure filter control */
1230         memset(&settings, 0, sizeof(settings));
1231         ret = i40e_set_filter_control(hw, &settings);
1232         if (ret)
1233                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1234                                         ret);
1235
1236         /* Disable flow control */
1237         hw->fc.requested_mode = I40E_FC_NONE;
1238         i40e_set_fc(hw, &aq_fail, TRUE);
1239
1240         /* uninitialize pf host driver */
1241         i40e_pf_host_uninit(dev);
1242
1243         rte_free(dev->data->mac_addrs);
1244         dev->data->mac_addrs = NULL;
1245
1246         /* disable uio intr before callback unregister */
1247         rte_intr_disable(&(pci_dev->intr_handle));
1248
1249         /* register callback func to eal lib */
1250         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1251                 i40e_dev_interrupt_handler, (void *)dev);
1252
1253         return 0;
1254 }
1255
1256 static int
1257 i40e_dev_configure(struct rte_eth_dev *dev)
1258 {
1259         struct i40e_adapter *ad =
1260                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1261         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1262         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1263         int i, ret;
1264
1265         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1266          * bulk allocation or vector Rx preconditions we will reset it.
1267          */
1268         ad->rx_bulk_alloc_allowed = true;
1269         ad->rx_vec_allowed = true;
1270         ad->tx_simple_allowed = true;
1271         ad->tx_vec_allowed = true;
1272
1273         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1274                 ret = i40e_fdir_setup(pf);
1275                 if (ret != I40E_SUCCESS) {
1276                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1277                         return -ENOTSUP;
1278                 }
1279                 ret = i40e_fdir_configure(dev);
1280                 if (ret < 0) {
1281                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1282                         goto err;
1283                 }
1284         } else
1285                 i40e_fdir_teardown(pf);
1286
1287         ret = i40e_dev_init_vlan(dev);
1288         if (ret < 0)
1289                 goto err;
1290
1291         /* VMDQ setup.
1292          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1293          *  RSS setting have different requirements.
1294          *  General PMD driver call sequence are NIC init, configure,
1295          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1296          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1297          *  applicable. So, VMDQ setting has to be done before
1298          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1299          *  For RSS setting, it will try to calculate actual configured RX queue
1300          *  number, which will be available after rx_queue_setup(). dev_start()
1301          *  function is good to place RSS setup.
1302          */
1303         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1304                 ret = i40e_vmdq_setup(dev);
1305                 if (ret)
1306                         goto err;
1307         }
1308
1309         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1310                 ret = i40e_dcb_setup(dev);
1311                 if (ret) {
1312                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1313                         goto err_dcb;
1314                 }
1315         }
1316
1317         return 0;
1318
1319 err_dcb:
1320         /* need to release vmdq resource if exists */
1321         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1322                 i40e_vsi_release(pf->vmdq[i].vsi);
1323                 pf->vmdq[i].vsi = NULL;
1324         }
1325         rte_free(pf->vmdq);
1326         pf->vmdq = NULL;
1327 err:
1328         /* need to release fdir resource if exists */
1329         i40e_fdir_teardown(pf);
1330         return ret;
1331 }
1332
1333 void
1334 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1335 {
1336         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1337         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1338         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1339         uint16_t msix_vect = vsi->msix_intr;
1340         uint16_t i;
1341
1342         for (i = 0; i < vsi->nb_qps; i++) {
1343                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1344                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1345                 rte_wmb();
1346         }
1347
1348         if (vsi->type != I40E_VSI_SRIOV) {
1349                 if (!rte_intr_allow_others(intr_handle)) {
1350                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1351                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1352                         I40E_WRITE_REG(hw,
1353                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1354                                        0);
1355                 } else {
1356                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1357                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1358                         I40E_WRITE_REG(hw,
1359                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1360                                                        msix_vect - 1), 0);
1361                 }
1362         } else {
1363                 uint32_t reg;
1364                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1365                         vsi->user_param + (msix_vect - 1);
1366
1367                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1368                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1369         }
1370         I40E_WRITE_FLUSH(hw);
1371 }
1372
1373 static void
1374 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1375                        int base_queue, int nb_queue)
1376 {
1377         int i;
1378         uint32_t val;
1379         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1380
1381         /* Bind all RX queues to allocated MSIX interrupt */
1382         for (i = 0; i < nb_queue; i++) {
1383                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1384                         I40E_QINT_RQCTL_ITR_INDX_MASK |
1385                         ((base_queue + i + 1) <<
1386                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1387                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1388                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1389
1390                 if (i == nb_queue - 1)
1391                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1392                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1393         }
1394
1395         /* Write first RX queue to Link list register as the head element */
1396         if (vsi->type != I40E_VSI_SRIOV) {
1397                 uint16_t interval =
1398                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1399
1400                 if (msix_vect == I40E_MISC_VEC_ID) {
1401                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1402                                        (base_queue <<
1403                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1404                                        (0x0 <<
1405                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1406                         I40E_WRITE_REG(hw,
1407                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1408                                        interval);
1409                 } else {
1410                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1411                                        (base_queue <<
1412                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1413                                        (0x0 <<
1414                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1415                         I40E_WRITE_REG(hw,
1416                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1417                                                        msix_vect - 1),
1418                                        interval);
1419                 }
1420         } else {
1421                 uint32_t reg;
1422
1423                 if (msix_vect == I40E_MISC_VEC_ID) {
1424                         I40E_WRITE_REG(hw,
1425                                        I40E_VPINT_LNKLST0(vsi->user_param),
1426                                        (base_queue <<
1427                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1428                                        (0x0 <<
1429                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1430                 } else {
1431                         /* num_msix_vectors_vf needs to minus irq0 */
1432                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1433                                 vsi->user_param + (msix_vect - 1);
1434
1435                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1436                                        (base_queue <<
1437                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1438                                        (0x0 <<
1439                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1440                 }
1441         }
1442
1443         I40E_WRITE_FLUSH(hw);
1444 }
1445
1446 void
1447 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1448 {
1449         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1450         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1451         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1452         uint16_t msix_vect = vsi->msix_intr;
1453         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1454         uint16_t queue_idx = 0;
1455         int record = 0;
1456         uint32_t val;
1457         int i;
1458
1459         for (i = 0; i < vsi->nb_qps; i++) {
1460                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1461                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1462         }
1463
1464         /* INTENA flag is not auto-cleared for interrupt */
1465         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1466         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1467                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1468                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1469         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1470
1471         /* VF bind interrupt */
1472         if (vsi->type == I40E_VSI_SRIOV) {
1473                 __vsi_queues_bind_intr(vsi, msix_vect,
1474                                        vsi->base_queue, vsi->nb_qps);
1475                 return;
1476         }
1477
1478         /* PF & VMDq bind interrupt */
1479         if (rte_intr_dp_is_en(intr_handle)) {
1480                 if (vsi->type == I40E_VSI_MAIN) {
1481                         queue_idx = 0;
1482                         record = 1;
1483                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1484                         struct i40e_vsi *main_vsi =
1485                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1486                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1487                         record = 1;
1488                 }
1489         }
1490
1491         for (i = 0; i < vsi->nb_used_qps; i++) {
1492                 if (nb_msix <= 1) {
1493                         if (!rte_intr_allow_others(intr_handle))
1494                                 /* allow to share MISC_VEC_ID */
1495                                 msix_vect = I40E_MISC_VEC_ID;
1496
1497                         /* no enough msix_vect, map all to one */
1498                         __vsi_queues_bind_intr(vsi, msix_vect,
1499                                                vsi->base_queue + i,
1500                                                vsi->nb_used_qps - i);
1501                         for (; !!record && i < vsi->nb_used_qps; i++)
1502                                 intr_handle->intr_vec[queue_idx + i] =
1503                                         msix_vect;
1504                         break;
1505                 }
1506                 /* 1:1 queue/msix_vect mapping */
1507                 __vsi_queues_bind_intr(vsi, msix_vect,
1508                                        vsi->base_queue + i, 1);
1509                 if (!!record)
1510                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1511
1512                 msix_vect++;
1513                 nb_msix--;
1514         }
1515 }
1516
1517 static void
1518 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1519 {
1520         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1521         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1522         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1523         uint16_t interval = i40e_calc_itr_interval(\
1524                 RTE_LIBRTE_I40E_ITR_INTERVAL);
1525         uint16_t msix_intr, i;
1526
1527         if (rte_intr_allow_others(intr_handle))
1528                 for (i = 0; i < vsi->nb_msix; i++) {
1529                         msix_intr = vsi->msix_intr + i;
1530                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1531                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1532                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1533                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1534                                 (interval <<
1535                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1536                 }
1537         else
1538                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1539                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1540                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1541                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1542                                (interval <<
1543                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1544
1545         I40E_WRITE_FLUSH(hw);
1546 }
1547
1548 static void
1549 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1550 {
1551         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1552         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1553         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1554         uint16_t msix_intr, i;
1555
1556         if (rte_intr_allow_others(intr_handle))
1557                 for (i = 0; i < vsi->nb_msix; i++) {
1558                         msix_intr = vsi->msix_intr + i;
1559                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1560                                        0);
1561                 }
1562         else
1563                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1564
1565         I40E_WRITE_FLUSH(hw);
1566 }
1567
1568 static inline uint8_t
1569 i40e_parse_link_speeds(uint16_t link_speeds)
1570 {
1571         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1572
1573         if (link_speeds & ETH_LINK_SPEED_40G)
1574                 link_speed |= I40E_LINK_SPEED_40GB;
1575         if (link_speeds & ETH_LINK_SPEED_25G)
1576                 link_speed |= I40E_LINK_SPEED_25GB;
1577         if (link_speeds & ETH_LINK_SPEED_20G)
1578                 link_speed |= I40E_LINK_SPEED_20GB;
1579         if (link_speeds & ETH_LINK_SPEED_10G)
1580                 link_speed |= I40E_LINK_SPEED_10GB;
1581         if (link_speeds & ETH_LINK_SPEED_1G)
1582                 link_speed |= I40E_LINK_SPEED_1GB;
1583         if (link_speeds & ETH_LINK_SPEED_100M)
1584                 link_speed |= I40E_LINK_SPEED_100MB;
1585
1586         return link_speed;
1587 }
1588
1589 static int
1590 i40e_phy_conf_link(struct i40e_hw *hw,
1591                    uint8_t abilities,
1592                    uint8_t force_speed)
1593 {
1594         enum i40e_status_code status;
1595         struct i40e_aq_get_phy_abilities_resp phy_ab;
1596         struct i40e_aq_set_phy_config phy_conf;
1597         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1598                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1599                         I40E_AQ_PHY_FLAG_PAUSE_RX |
1600                         I40E_AQ_PHY_FLAG_LOW_POWER;
1601         const uint8_t advt = I40E_LINK_SPEED_40GB |
1602                         I40E_LINK_SPEED_25GB |
1603                         I40E_LINK_SPEED_10GB |
1604                         I40E_LINK_SPEED_1GB |
1605                         I40E_LINK_SPEED_100MB;
1606         int ret = -ENOTSUP;
1607
1608
1609         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1610                                               NULL);
1611         if (status)
1612                 return ret;
1613
1614         memset(&phy_conf, 0, sizeof(phy_conf));
1615
1616         /* bits 0-2 use the values from get_phy_abilities_resp */
1617         abilities &= ~mask;
1618         abilities |= phy_ab.abilities & mask;
1619
1620         /* update ablities and speed */
1621         if (abilities & I40E_AQ_PHY_AN_ENABLED)
1622                 phy_conf.link_speed = advt;
1623         else
1624                 phy_conf.link_speed = force_speed;
1625
1626         phy_conf.abilities = abilities;
1627
1628         /* use get_phy_abilities_resp value for the rest */
1629         phy_conf.phy_type = phy_ab.phy_type;
1630         phy_conf.eee_capability = phy_ab.eee_capability;
1631         phy_conf.eeer = phy_ab.eeer_val;
1632         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1633
1634         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1635                     phy_ab.abilities, phy_ab.link_speed);
1636         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1637                     phy_conf.abilities, phy_conf.link_speed);
1638
1639         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1640         if (status)
1641                 return ret;
1642
1643         return I40E_SUCCESS;
1644 }
1645
1646 static int
1647 i40e_apply_link_speed(struct rte_eth_dev *dev)
1648 {
1649         uint8_t speed;
1650         uint8_t abilities = 0;
1651         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1652         struct rte_eth_conf *conf = &dev->data->dev_conf;
1653
1654         speed = i40e_parse_link_speeds(conf->link_speeds);
1655         if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
1656                 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1657         if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1658                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1659         abilities |= I40E_AQ_PHY_LINK_ENABLED;
1660
1661         /* Skip changing speed on 40G interfaces, FW does not support */
1662         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
1663                 speed =  I40E_LINK_SPEED_UNKNOWN;
1664                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1665         }
1666
1667         return i40e_phy_conf_link(hw, abilities, speed);
1668 }
1669
1670 static int
1671 i40e_dev_start(struct rte_eth_dev *dev)
1672 {
1673         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1674         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1675         struct i40e_vsi *main_vsi = pf->main_vsi;
1676         int ret, i;
1677         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1678         uint32_t intr_vector = 0;
1679
1680         hw->adapter_stopped = 0;
1681
1682         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1683                 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
1684                              dev->data->port_id);
1685                 return -EINVAL;
1686         }
1687
1688         rte_intr_disable(intr_handle);
1689
1690         if ((rte_intr_cap_multiple(intr_handle) ||
1691              !RTE_ETH_DEV_SRIOV(dev).active) &&
1692             dev->data->dev_conf.intr_conf.rxq != 0) {
1693                 intr_vector = dev->data->nb_rx_queues;
1694                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1695                         return -1;
1696         }
1697
1698         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1699                 intr_handle->intr_vec =
1700                         rte_zmalloc("intr_vec",
1701                                     dev->data->nb_rx_queues * sizeof(int),
1702                                     0);
1703                 if (!intr_handle->intr_vec) {
1704                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1705                                      " intr_vec\n", dev->data->nb_rx_queues);
1706                         return -ENOMEM;
1707                 }
1708         }
1709
1710         /* Initialize VSI */
1711         ret = i40e_dev_rxtx_init(pf);
1712         if (ret != I40E_SUCCESS) {
1713                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1714                 goto err_up;
1715         }
1716
1717         /* Map queues with MSIX interrupt */
1718         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1719                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1720         i40e_vsi_queues_bind_intr(main_vsi);
1721         i40e_vsi_enable_queues_intr(main_vsi);
1722
1723         /* Map VMDQ VSI queues with MSIX interrupt */
1724         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1725                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1726                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1727                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1728         }
1729
1730         /* enable FDIR MSIX interrupt */
1731         if (pf->fdir.fdir_vsi) {
1732                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1733                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1734         }
1735
1736         /* Enable all queues which have been configured */
1737         ret = i40e_dev_switch_queues(pf, TRUE);
1738         if (ret != I40E_SUCCESS) {
1739                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1740                 goto err_up;
1741         }
1742
1743         /* Enable receiving broadcast packets */
1744         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1745         if (ret != I40E_SUCCESS)
1746                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1747
1748         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1749                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1750                                                 true, NULL);
1751                 if (ret != I40E_SUCCESS)
1752                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1753         }
1754
1755         /* Apply link configure */
1756         if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
1757                                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1758                                 ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
1759                                 ETH_LINK_SPEED_40G)) {
1760                 PMD_DRV_LOG(ERR, "Invalid link setting");
1761                 goto err_up;
1762         }
1763         ret = i40e_apply_link_speed(dev);
1764         if (I40E_SUCCESS != ret) {
1765                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1766                 goto err_up;
1767         }
1768
1769         if (!rte_intr_allow_others(intr_handle)) {
1770                 rte_intr_callback_unregister(intr_handle,
1771                                              i40e_dev_interrupt_handler,
1772                                              (void *)dev);
1773                 /* configure and enable device interrupt */
1774                 i40e_pf_config_irq0(hw, FALSE);
1775                 i40e_pf_enable_irq0(hw);
1776
1777                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1778                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1779                                      " no intr multiplex\n");
1780         }
1781
1782         /* enable uio intr after callback register */
1783         rte_intr_enable(intr_handle);
1784
1785         return I40E_SUCCESS;
1786
1787 err_up:
1788         i40e_dev_switch_queues(pf, FALSE);
1789         i40e_dev_clear_queues(dev);
1790
1791         return ret;
1792 }
1793
1794 static void
1795 i40e_dev_stop(struct rte_eth_dev *dev)
1796 {
1797         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1798         struct i40e_vsi *main_vsi = pf->main_vsi;
1799         struct i40e_mirror_rule *p_mirror;
1800         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1801         int i;
1802
1803         /* Disable all queues */
1804         i40e_dev_switch_queues(pf, FALSE);
1805
1806         /* un-map queues with interrupt registers */
1807         i40e_vsi_disable_queues_intr(main_vsi);
1808         i40e_vsi_queues_unbind_intr(main_vsi);
1809
1810         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1811                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
1812                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
1813         }
1814
1815         if (pf->fdir.fdir_vsi) {
1816                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
1817                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
1818         }
1819         /* Clear all queues and release memory */
1820         i40e_dev_clear_queues(dev);
1821
1822         /* Set link down */
1823         i40e_dev_set_link_down(dev);
1824
1825         /* Remove all mirror rules */
1826         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
1827                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
1828                 rte_free(p_mirror);
1829         }
1830         pf->nb_mirror_rule = 0;
1831
1832         if (!rte_intr_allow_others(intr_handle))
1833                 /* resume to the default handler */
1834                 rte_intr_callback_register(intr_handle,
1835                                            i40e_dev_interrupt_handler,
1836                                            (void *)dev);
1837
1838         /* Clean datapath event and queue/vec mapping */
1839         rte_intr_efd_disable(intr_handle);
1840         if (intr_handle->intr_vec) {
1841                 rte_free(intr_handle->intr_vec);
1842                 intr_handle->intr_vec = NULL;
1843         }
1844 }
1845
1846 static void
1847 i40e_dev_close(struct rte_eth_dev *dev)
1848 {
1849         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1850         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1851         uint32_t reg;
1852         int i;
1853
1854         PMD_INIT_FUNC_TRACE();
1855
1856         i40e_dev_stop(dev);
1857         hw->adapter_stopped = 1;
1858         i40e_dev_free_queues(dev);
1859
1860         /* Disable interrupt */
1861         i40e_pf_disable_irq0(hw);
1862         rte_intr_disable(&(dev->pci_dev->intr_handle));
1863
1864         /* shutdown and destroy the HMC */
1865         i40e_shutdown_lan_hmc(hw);
1866
1867         /* release all the existing VSIs and VEBs */
1868         i40e_fdir_teardown(pf);
1869         i40e_vsi_release(pf->main_vsi);
1870
1871         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1872                 i40e_vsi_release(pf->vmdq[i].vsi);
1873                 pf->vmdq[i].vsi = NULL;
1874         }
1875
1876         rte_free(pf->vmdq);
1877         pf->vmdq = NULL;
1878
1879         /* shutdown the adminq */
1880         i40e_aq_queue_shutdown(hw, true);
1881         i40e_shutdown_adminq(hw);
1882
1883         i40e_res_pool_destroy(&pf->qp_pool);
1884         i40e_res_pool_destroy(&pf->msix_pool);
1885
1886         /* force a PF reset to clean anything leftover */
1887         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1888         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1889                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1890         I40E_WRITE_FLUSH(hw);
1891 }
1892
1893 static void
1894 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1895 {
1896         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1897         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1898         struct i40e_vsi *vsi = pf->main_vsi;
1899         int status;
1900
1901         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1902                                                      true, NULL, true);
1903         if (status != I40E_SUCCESS)
1904                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1905
1906         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1907                                                         TRUE, NULL);
1908         if (status != I40E_SUCCESS)
1909                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1910
1911 }
1912
1913 static void
1914 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1915 {
1916         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1917         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1918         struct i40e_vsi *vsi = pf->main_vsi;
1919         int status;
1920
1921         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1922                                                      false, NULL, true);
1923         if (status != I40E_SUCCESS)
1924                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1925
1926         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1927                                                         false, NULL);
1928         if (status != I40E_SUCCESS)
1929                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1930 }
1931
1932 static void
1933 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1934 {
1935         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1936         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1937         struct i40e_vsi *vsi = pf->main_vsi;
1938         int ret;
1939
1940         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1941         if (ret != I40E_SUCCESS)
1942                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1943 }
1944
1945 static void
1946 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1947 {
1948         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1949         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1950         struct i40e_vsi *vsi = pf->main_vsi;
1951         int ret;
1952
1953         if (dev->data->promiscuous == 1)
1954                 return; /* must remain in all_multicast mode */
1955
1956         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1957                                 vsi->seid, FALSE, NULL);
1958         if (ret != I40E_SUCCESS)
1959                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1960 }
1961
1962 /*
1963  * Set device link up.
1964  */
1965 static int
1966 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1967 {
1968         /* re-apply link speed setting */
1969         return i40e_apply_link_speed(dev);
1970 }
1971
1972 /*
1973  * Set device link down.
1974  */
1975 static int
1976 i40e_dev_set_link_down(struct rte_eth_dev *dev)
1977 {
1978         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1979         uint8_t abilities = 0;
1980         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1981
1982         if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
1983                 abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1984         return i40e_phy_conf_link(hw, abilities, speed);
1985 }
1986
1987 int
1988 i40e_dev_link_update(struct rte_eth_dev *dev,
1989                      int wait_to_complete)
1990 {
1991 #define CHECK_INTERVAL 100  /* 100ms */
1992 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1993         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1994         struct i40e_link_status link_status;
1995         struct rte_eth_link link, old;
1996         int status;
1997         unsigned rep_cnt = MAX_REPEAT_TIME;
1998
1999         memset(&link, 0, sizeof(link));
2000         memset(&old, 0, sizeof(old));
2001         memset(&link_status, 0, sizeof(link_status));
2002         rte_i40e_dev_atomic_read_link_status(dev, &old);
2003
2004         do {
2005                 /* Get link status information from hardware */
2006                 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
2007                 if (status != I40E_SUCCESS) {
2008                         link.link_speed = ETH_SPEED_NUM_100M;
2009                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2010                         PMD_DRV_LOG(ERR, "Failed to get link info");
2011                         goto out;
2012                 }
2013
2014                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2015                 if (!wait_to_complete)
2016                         break;
2017
2018                 rte_delay_ms(CHECK_INTERVAL);
2019         } while (!link.link_status && rep_cnt--);
2020
2021         if (!link.link_status)
2022                 goto out;
2023
2024         /* i40e uses full duplex only */
2025         link.link_duplex = ETH_LINK_FULL_DUPLEX;
2026
2027         /* Parse the link status */
2028         switch (link_status.link_speed) {
2029         case I40E_LINK_SPEED_100MB:
2030                 link.link_speed = ETH_SPEED_NUM_100M;
2031                 break;
2032         case I40E_LINK_SPEED_1GB:
2033                 link.link_speed = ETH_SPEED_NUM_1G;
2034                 break;
2035         case I40E_LINK_SPEED_10GB:
2036                 link.link_speed = ETH_SPEED_NUM_10G;
2037                 break;
2038         case I40E_LINK_SPEED_20GB:
2039                 link.link_speed = ETH_SPEED_NUM_20G;
2040                 break;
2041         case I40E_LINK_SPEED_25GB:
2042                 link.link_speed = ETH_SPEED_NUM_25G;
2043                 break;
2044         case I40E_LINK_SPEED_40GB:
2045                 link.link_speed = ETH_SPEED_NUM_40G;
2046                 break;
2047         default:
2048                 link.link_speed = ETH_SPEED_NUM_100M;
2049                 break;
2050         }
2051
2052         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2053                         ETH_LINK_SPEED_FIXED);
2054
2055 out:
2056         rte_i40e_dev_atomic_write_link_status(dev, &link);
2057         if (link.link_status == old.link_status)
2058                 return -1;
2059
2060         return 0;
2061 }
2062
2063 /* Get all the statistics of a VSI */
2064 void
2065 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2066 {
2067         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2068         struct i40e_eth_stats *nes = &vsi->eth_stats;
2069         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2070         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2071
2072         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2073                             vsi->offset_loaded, &oes->rx_bytes,
2074                             &nes->rx_bytes);
2075         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2076                             vsi->offset_loaded, &oes->rx_unicast,
2077                             &nes->rx_unicast);
2078         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2079                             vsi->offset_loaded, &oes->rx_multicast,
2080                             &nes->rx_multicast);
2081         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2082                             vsi->offset_loaded, &oes->rx_broadcast,
2083                             &nes->rx_broadcast);
2084         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2085                             &oes->rx_discards, &nes->rx_discards);
2086         /* GLV_REPC not supported */
2087         /* GLV_RMPC not supported */
2088         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2089                             &oes->rx_unknown_protocol,
2090                             &nes->rx_unknown_protocol);
2091         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2092                             vsi->offset_loaded, &oes->tx_bytes,
2093                             &nes->tx_bytes);
2094         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2095                             vsi->offset_loaded, &oes->tx_unicast,
2096                             &nes->tx_unicast);
2097         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2098                             vsi->offset_loaded, &oes->tx_multicast,
2099                             &nes->tx_multicast);
2100         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2101                             vsi->offset_loaded,  &oes->tx_broadcast,
2102                             &nes->tx_broadcast);
2103         /* GLV_TDPC not supported */
2104         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2105                             &oes->tx_errors, &nes->tx_errors);
2106         vsi->offset_loaded = true;
2107
2108         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2109                     vsi->vsi_id);
2110         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2111         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2112         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2113         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2114         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2115         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2116                     nes->rx_unknown_protocol);
2117         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2118         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2119         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2120         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2121         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2122         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2123         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2124                     vsi->vsi_id);
2125 }
2126
2127 static void
2128 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2129 {
2130         unsigned int i;
2131         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2132         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2133
2134         /* Get statistics of struct i40e_eth_stats */
2135         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2136                             I40E_GLPRT_GORCL(hw->port),
2137                             pf->offset_loaded, &os->eth.rx_bytes,
2138                             &ns->eth.rx_bytes);
2139         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2140                             I40E_GLPRT_UPRCL(hw->port),
2141                             pf->offset_loaded, &os->eth.rx_unicast,
2142                             &ns->eth.rx_unicast);
2143         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2144                             I40E_GLPRT_MPRCL(hw->port),
2145                             pf->offset_loaded, &os->eth.rx_multicast,
2146                             &ns->eth.rx_multicast);
2147         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2148                             I40E_GLPRT_BPRCL(hw->port),
2149                             pf->offset_loaded, &os->eth.rx_broadcast,
2150                             &ns->eth.rx_broadcast);
2151         /* Workaround: CRC size should not be included in byte statistics,
2152          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2153          */
2154         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2155                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2156
2157         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2158                             pf->offset_loaded, &os->eth.rx_discards,
2159                             &ns->eth.rx_discards);
2160         /* GLPRT_REPC not supported */
2161         /* GLPRT_RMPC not supported */
2162         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2163                             pf->offset_loaded,
2164                             &os->eth.rx_unknown_protocol,
2165                             &ns->eth.rx_unknown_protocol);
2166         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2167                             I40E_GLPRT_GOTCL(hw->port),
2168                             pf->offset_loaded, &os->eth.tx_bytes,
2169                             &ns->eth.tx_bytes);
2170         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2171                             I40E_GLPRT_UPTCL(hw->port),
2172                             pf->offset_loaded, &os->eth.tx_unicast,
2173                             &ns->eth.tx_unicast);
2174         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2175                             I40E_GLPRT_MPTCL(hw->port),
2176                             pf->offset_loaded, &os->eth.tx_multicast,
2177                             &ns->eth.tx_multicast);
2178         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2179                             I40E_GLPRT_BPTCL(hw->port),
2180                             pf->offset_loaded, &os->eth.tx_broadcast,
2181                             &ns->eth.tx_broadcast);
2182         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2183                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2184         /* GLPRT_TEPC not supported */
2185
2186         /* additional port specific stats */
2187         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2188                             pf->offset_loaded, &os->tx_dropped_link_down,
2189                             &ns->tx_dropped_link_down);
2190         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2191                             pf->offset_loaded, &os->crc_errors,
2192                             &ns->crc_errors);
2193         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2194                             pf->offset_loaded, &os->illegal_bytes,
2195                             &ns->illegal_bytes);
2196         /* GLPRT_ERRBC not supported */
2197         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2198                             pf->offset_loaded, &os->mac_local_faults,
2199                             &ns->mac_local_faults);
2200         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2201                             pf->offset_loaded, &os->mac_remote_faults,
2202                             &ns->mac_remote_faults);
2203         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2204                             pf->offset_loaded, &os->rx_length_errors,
2205                             &ns->rx_length_errors);
2206         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2207                             pf->offset_loaded, &os->link_xon_rx,
2208                             &ns->link_xon_rx);
2209         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2210                             pf->offset_loaded, &os->link_xoff_rx,
2211                             &ns->link_xoff_rx);
2212         for (i = 0; i < 8; i++) {
2213                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2214                                     pf->offset_loaded,
2215                                     &os->priority_xon_rx[i],
2216                                     &ns->priority_xon_rx[i]);
2217                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2218                                     pf->offset_loaded,
2219                                     &os->priority_xoff_rx[i],
2220                                     &ns->priority_xoff_rx[i]);
2221         }
2222         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2223                             pf->offset_loaded, &os->link_xon_tx,
2224                             &ns->link_xon_tx);
2225         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2226                             pf->offset_loaded, &os->link_xoff_tx,
2227                             &ns->link_xoff_tx);
2228         for (i = 0; i < 8; i++) {
2229                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2230                                     pf->offset_loaded,
2231                                     &os->priority_xon_tx[i],
2232                                     &ns->priority_xon_tx[i]);
2233                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2234                                     pf->offset_loaded,
2235                                     &os->priority_xoff_tx[i],
2236                                     &ns->priority_xoff_tx[i]);
2237                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2238                                     pf->offset_loaded,
2239                                     &os->priority_xon_2_xoff[i],
2240                                     &ns->priority_xon_2_xoff[i]);
2241         }
2242         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2243                             I40E_GLPRT_PRC64L(hw->port),
2244                             pf->offset_loaded, &os->rx_size_64,
2245                             &ns->rx_size_64);
2246         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2247                             I40E_GLPRT_PRC127L(hw->port),
2248                             pf->offset_loaded, &os->rx_size_127,
2249                             &ns->rx_size_127);
2250         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2251                             I40E_GLPRT_PRC255L(hw->port),
2252                             pf->offset_loaded, &os->rx_size_255,
2253                             &ns->rx_size_255);
2254         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2255                             I40E_GLPRT_PRC511L(hw->port),
2256                             pf->offset_loaded, &os->rx_size_511,
2257                             &ns->rx_size_511);
2258         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2259                             I40E_GLPRT_PRC1023L(hw->port),
2260                             pf->offset_loaded, &os->rx_size_1023,
2261                             &ns->rx_size_1023);
2262         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2263                             I40E_GLPRT_PRC1522L(hw->port),
2264                             pf->offset_loaded, &os->rx_size_1522,
2265                             &ns->rx_size_1522);
2266         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2267                             I40E_GLPRT_PRC9522L(hw->port),
2268                             pf->offset_loaded, &os->rx_size_big,
2269                             &ns->rx_size_big);
2270         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2271                             pf->offset_loaded, &os->rx_undersize,
2272                             &ns->rx_undersize);
2273         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2274                             pf->offset_loaded, &os->rx_fragments,
2275                             &ns->rx_fragments);
2276         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2277                             pf->offset_loaded, &os->rx_oversize,
2278                             &ns->rx_oversize);
2279         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2280                             pf->offset_loaded, &os->rx_jabber,
2281                             &ns->rx_jabber);
2282         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2283                             I40E_GLPRT_PTC64L(hw->port),
2284                             pf->offset_loaded, &os->tx_size_64,
2285                             &ns->tx_size_64);
2286         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2287                             I40E_GLPRT_PTC127L(hw->port),
2288                             pf->offset_loaded, &os->tx_size_127,
2289                             &ns->tx_size_127);
2290         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2291                             I40E_GLPRT_PTC255L(hw->port),
2292                             pf->offset_loaded, &os->tx_size_255,
2293                             &ns->tx_size_255);
2294         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2295                             I40E_GLPRT_PTC511L(hw->port),
2296                             pf->offset_loaded, &os->tx_size_511,
2297                             &ns->tx_size_511);
2298         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2299                             I40E_GLPRT_PTC1023L(hw->port),
2300                             pf->offset_loaded, &os->tx_size_1023,
2301                             &ns->tx_size_1023);
2302         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2303                             I40E_GLPRT_PTC1522L(hw->port),
2304                             pf->offset_loaded, &os->tx_size_1522,
2305                             &ns->tx_size_1522);
2306         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2307                             I40E_GLPRT_PTC9522L(hw->port),
2308                             pf->offset_loaded, &os->tx_size_big,
2309                             &ns->tx_size_big);
2310         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2311                            pf->offset_loaded,
2312                            &os->fd_sb_match, &ns->fd_sb_match);
2313         /* GLPRT_MSPDC not supported */
2314         /* GLPRT_XEC not supported */
2315
2316         pf->offset_loaded = true;
2317
2318         if (pf->main_vsi)
2319                 i40e_update_vsi_stats(pf->main_vsi);
2320 }
2321
2322 /* Get all statistics of a port */
2323 static void
2324 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2325 {
2326         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2327         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2328         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2329         unsigned i;
2330
2331         /* call read registers - updates values, now write them to struct */
2332         i40e_read_stats_registers(pf, hw);
2333
2334         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2335                         pf->main_vsi->eth_stats.rx_multicast +
2336                         pf->main_vsi->eth_stats.rx_broadcast -
2337                         pf->main_vsi->eth_stats.rx_discards;
2338         stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2339                         pf->main_vsi->eth_stats.tx_multicast +
2340                         pf->main_vsi->eth_stats.tx_broadcast;
2341         stats->ibytes   = ns->eth.rx_bytes;
2342         stats->obytes   = ns->eth.tx_bytes;
2343         stats->oerrors  = ns->eth.tx_errors +
2344                         pf->main_vsi->eth_stats.tx_errors;
2345
2346         /* Rx Errors */
2347         stats->imissed  = ns->eth.rx_discards +
2348                         pf->main_vsi->eth_stats.rx_discards;
2349         stats->ierrors  = ns->crc_errors +
2350                         ns->rx_length_errors + ns->rx_undersize +
2351                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2352
2353         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2354         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2355         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2356         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2357         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2358         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2359         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2360                     ns->eth.rx_unknown_protocol);
2361         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2362         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2363         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2364         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2365         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2366         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2367
2368         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2369                     ns->tx_dropped_link_down);
2370         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2371         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2372                     ns->illegal_bytes);
2373         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2374         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2375                     ns->mac_local_faults);
2376         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2377                     ns->mac_remote_faults);
2378         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2379                     ns->rx_length_errors);
2380         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2381         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2382         for (i = 0; i < 8; i++) {
2383                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2384                                 i, ns->priority_xon_rx[i]);
2385                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2386                                 i, ns->priority_xoff_rx[i]);
2387         }
2388         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2389         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2390         for (i = 0; i < 8; i++) {
2391                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2392                                 i, ns->priority_xon_tx[i]);
2393                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2394                                 i, ns->priority_xoff_tx[i]);
2395                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2396                                 i, ns->priority_xon_2_xoff[i]);
2397         }
2398         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2399         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2400         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2401         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2402         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2403         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2404         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2405         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2406         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2407         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2408         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2409         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2410         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2411         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2412         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2413         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2414         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2415         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2416         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2417                         ns->mac_short_packet_dropped);
2418         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2419                     ns->checksum_error);
2420         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2421         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2422 }
2423
2424 /* Reset the statistics */
2425 static void
2426 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2427 {
2428         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2429         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2430
2431         /* Mark PF and VSI stats to update the offset, aka "reset" */
2432         pf->offset_loaded = false;
2433         if (pf->main_vsi)
2434                 pf->main_vsi->offset_loaded = false;
2435
2436         /* read the stats, reading current register values into offset */
2437         i40e_read_stats_registers(pf, hw);
2438 }
2439
2440 static uint32_t
2441 i40e_xstats_calc_num(void)
2442 {
2443         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2444                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2445                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2446 }
2447
2448 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2449                                      struct rte_eth_xstat_name *xstats_names,
2450                                      __rte_unused unsigned limit)
2451 {
2452         unsigned count = 0;
2453         unsigned i, prio;
2454
2455         if (xstats_names == NULL)
2456                 return i40e_xstats_calc_num();
2457
2458         /* Note: limit checked in rte_eth_xstats_names() */
2459
2460         /* Get stats from i40e_eth_stats struct */
2461         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2462                 snprintf(xstats_names[count].name,
2463                          sizeof(xstats_names[count].name),
2464                          "%s", rte_i40e_stats_strings[i].name);
2465                 count++;
2466         }
2467
2468         /* Get individiual stats from i40e_hw_port struct */
2469         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2470                 snprintf(xstats_names[count].name,
2471                         sizeof(xstats_names[count].name),
2472                          "%s", rte_i40e_hw_port_strings[i].name);
2473                 count++;
2474         }
2475
2476         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2477                 for (prio = 0; prio < 8; prio++) {
2478                         snprintf(xstats_names[count].name,
2479                                  sizeof(xstats_names[count].name),
2480                                  "rx_priority%u_%s", prio,
2481                                  rte_i40e_rxq_prio_strings[i].name);
2482                         count++;
2483                 }
2484         }
2485
2486         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2487                 for (prio = 0; prio < 8; prio++) {
2488                         snprintf(xstats_names[count].name,
2489                                  sizeof(xstats_names[count].name),
2490                                  "tx_priority%u_%s", prio,
2491                                  rte_i40e_txq_prio_strings[i].name);
2492                         count++;
2493                 }
2494         }
2495         return count;
2496 }
2497
2498 static int
2499 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2500                     unsigned n)
2501 {
2502         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2503         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2504         unsigned i, count, prio;
2505         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2506
2507         count = i40e_xstats_calc_num();
2508         if (n < count)
2509                 return count;
2510
2511         i40e_read_stats_registers(pf, hw);
2512
2513         if (xstats == NULL)
2514                 return 0;
2515
2516         count = 0;
2517
2518         /* Get stats from i40e_eth_stats struct */
2519         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2520                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2521                         rte_i40e_stats_strings[i].offset);
2522                 count++;
2523         }
2524
2525         /* Get individiual stats from i40e_hw_port struct */
2526         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2527                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2528                         rte_i40e_hw_port_strings[i].offset);
2529                 count++;
2530         }
2531
2532         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2533                 for (prio = 0; prio < 8; prio++) {
2534                         xstats[count].value =
2535                                 *(uint64_t *)(((char *)hw_stats) +
2536                                 rte_i40e_rxq_prio_strings[i].offset +
2537                                 (sizeof(uint64_t) * prio));
2538                         count++;
2539                 }
2540         }
2541
2542         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2543                 for (prio = 0; prio < 8; prio++) {
2544                         xstats[count].value =
2545                                 *(uint64_t *)(((char *)hw_stats) +
2546                                 rte_i40e_txq_prio_strings[i].offset +
2547                                 (sizeof(uint64_t) * prio));
2548                         count++;
2549                 }
2550         }
2551
2552         return count;
2553 }
2554
2555 static int
2556 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2557                                  __rte_unused uint16_t queue_id,
2558                                  __rte_unused uint8_t stat_idx,
2559                                  __rte_unused uint8_t is_rx)
2560 {
2561         PMD_INIT_FUNC_TRACE();
2562
2563         return -ENOSYS;
2564 }
2565
2566 static void
2567 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2568 {
2569         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2570         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2571         struct i40e_vsi *vsi = pf->main_vsi;
2572
2573         dev_info->max_rx_queues = vsi->nb_qps;
2574         dev_info->max_tx_queues = vsi->nb_qps;
2575         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2576         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2577         dev_info->max_mac_addrs = vsi->max_macaddrs;
2578         dev_info->max_vfs = dev->pci_dev->max_vfs;
2579         dev_info->rx_offload_capa =
2580                 DEV_RX_OFFLOAD_VLAN_STRIP |
2581                 DEV_RX_OFFLOAD_QINQ_STRIP |
2582                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2583                 DEV_RX_OFFLOAD_UDP_CKSUM |
2584                 DEV_RX_OFFLOAD_TCP_CKSUM;
2585         dev_info->tx_offload_capa =
2586                 DEV_TX_OFFLOAD_VLAN_INSERT |
2587                 DEV_TX_OFFLOAD_QINQ_INSERT |
2588                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2589                 DEV_TX_OFFLOAD_UDP_CKSUM |
2590                 DEV_TX_OFFLOAD_TCP_CKSUM |
2591                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2592                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2593                 DEV_TX_OFFLOAD_TCP_TSO |
2594                 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2595                 DEV_TX_OFFLOAD_GRE_TNL_TSO |
2596                 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2597                 DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
2598         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2599                                                 sizeof(uint32_t);
2600         dev_info->reta_size = pf->hash_lut_size;
2601         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2602
2603         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2604                 .rx_thresh = {
2605                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2606                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2607                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2608                 },
2609                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2610                 .rx_drop_en = 0,
2611         };
2612
2613         dev_info->default_txconf = (struct rte_eth_txconf) {
2614                 .tx_thresh = {
2615                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2616                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2617                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2618                 },
2619                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2620                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2621                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2622                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2623         };
2624
2625         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2626                 .nb_max = I40E_MAX_RING_DESC,
2627                 .nb_min = I40E_MIN_RING_DESC,
2628                 .nb_align = I40E_ALIGN_RING_DESC,
2629         };
2630
2631         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2632                 .nb_max = I40E_MAX_RING_DESC,
2633                 .nb_min = I40E_MIN_RING_DESC,
2634                 .nb_align = I40E_ALIGN_RING_DESC,
2635         };
2636
2637         if (pf->flags & I40E_FLAG_VMDQ) {
2638                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2639                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2640                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2641                                                 pf->max_nb_vmdq_vsi;
2642                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2643                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2644                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2645         }
2646
2647         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
2648                 /* For XL710 */
2649                 dev_info->speed_capa = ETH_LINK_SPEED_40G;
2650         else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
2651                 /* For XXV710 */
2652                 dev_info->speed_capa = ETH_LINK_SPEED_25G;
2653         else
2654                 /* For X710 */
2655                 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2656 }
2657
2658 static int
2659 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2660 {
2661         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2662         struct i40e_vsi *vsi = pf->main_vsi;
2663         PMD_INIT_FUNC_TRACE();
2664
2665         if (on)
2666                 return i40e_vsi_add_vlan(vsi, vlan_id);
2667         else
2668                 return i40e_vsi_delete_vlan(vsi, vlan_id);
2669 }
2670
2671 static int
2672 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
2673                    enum rte_vlan_type vlan_type,
2674                    uint16_t tpid)
2675 {
2676         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2677         uint64_t reg_r = 0, reg_w = 0;
2678         uint16_t reg_id = 0;
2679         int ret = 0;
2680         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
2681
2682         switch (vlan_type) {
2683         case ETH_VLAN_TYPE_OUTER:
2684                 if (qinq)
2685                         reg_id = 2;
2686                 else
2687                         reg_id = 3;
2688                 break;
2689         case ETH_VLAN_TYPE_INNER:
2690                 if (qinq)
2691                         reg_id = 3;
2692                 else {
2693                         ret = -EINVAL;
2694                         PMD_DRV_LOG(ERR,
2695                                 "Unsupported vlan type in single vlan.\n");
2696                         return ret;
2697                 }
2698                 break;
2699         default:
2700                 ret = -EINVAL;
2701                 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2702                 return ret;
2703         }
2704         ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2705                                           &reg_r, NULL);
2706         if (ret != I40E_SUCCESS) {
2707                 PMD_DRV_LOG(ERR, "Fail to debug read from "
2708                             "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2709                 ret = -EIO;
2710                 return ret;
2711         }
2712         PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
2713                     "0x%08"PRIx64"", reg_id, reg_r);
2714
2715         reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
2716         reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
2717         if (reg_r == reg_w) {
2718                 ret = 0;
2719                 PMD_DRV_LOG(DEBUG, "No need to write");
2720                 return ret;
2721         }
2722
2723         ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2724                                            reg_w, NULL);
2725         if (ret != I40E_SUCCESS) {
2726                 ret = -EIO;
2727                 PMD_DRV_LOG(ERR, "Fail to debug write to "
2728                             "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2729                 return ret;
2730         }
2731         PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2732                     "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2733
2734         return ret;
2735 }
2736
2737 static void
2738 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2739 {
2740         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2741         struct i40e_vsi *vsi = pf->main_vsi;
2742
2743         if (mask & ETH_VLAN_FILTER_MASK) {
2744                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2745                         i40e_vsi_config_vlan_filter(vsi, TRUE);
2746                 else
2747                         i40e_vsi_config_vlan_filter(vsi, FALSE);
2748         }
2749
2750         if (mask & ETH_VLAN_STRIP_MASK) {
2751                 /* Enable or disable VLAN stripping */
2752                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2753                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
2754                 else
2755                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
2756         }
2757
2758         if (mask & ETH_VLAN_EXTEND_MASK) {
2759                 if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
2760                         i40e_vsi_config_double_vlan(vsi, TRUE);
2761                         /* Set global registers with default ether type value */
2762                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
2763                                            ETHER_TYPE_VLAN);
2764                         i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
2765                                            ETHER_TYPE_VLAN);
2766                 }
2767                 else
2768                         i40e_vsi_config_double_vlan(vsi, FALSE);
2769         }
2770 }
2771
2772 static void
2773 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
2774                           __rte_unused uint16_t queue,
2775                           __rte_unused int on)
2776 {
2777         PMD_INIT_FUNC_TRACE();
2778 }
2779
2780 static int
2781 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2782 {
2783         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2784         struct i40e_vsi *vsi = pf->main_vsi;
2785         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
2786         struct i40e_vsi_vlan_pvid_info info;
2787
2788         memset(&info, 0, sizeof(info));
2789         info.on = on;
2790         if (info.on)
2791                 info.config.pvid = pvid;
2792         else {
2793                 info.config.reject.tagged =
2794                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
2795                 info.config.reject.untagged =
2796                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
2797         }
2798
2799         return i40e_vsi_vlan_pvid_set(vsi, &info);
2800 }
2801
2802 static int
2803 i40e_dev_led_on(struct rte_eth_dev *dev)
2804 {
2805         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2806         uint32_t mode = i40e_led_get(hw);
2807
2808         if (mode == 0)
2809                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
2810
2811         return 0;
2812 }
2813
2814 static int
2815 i40e_dev_led_off(struct rte_eth_dev *dev)
2816 {
2817         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2818         uint32_t mode = i40e_led_get(hw);
2819
2820         if (mode != 0)
2821                 i40e_led_set(hw, 0, false);
2822
2823         return 0;
2824 }
2825
2826 static int
2827 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2828 {
2829         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2830         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2831
2832         fc_conf->pause_time = pf->fc_conf.pause_time;
2833         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
2834         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
2835
2836          /* Return current mode according to actual setting*/
2837         switch (hw->fc.current_mode) {
2838         case I40E_FC_FULL:
2839                 fc_conf->mode = RTE_FC_FULL;
2840                 break;
2841         case I40E_FC_TX_PAUSE:
2842                 fc_conf->mode = RTE_FC_TX_PAUSE;
2843                 break;
2844         case I40E_FC_RX_PAUSE:
2845                 fc_conf->mode = RTE_FC_RX_PAUSE;
2846                 break;
2847         case I40E_FC_NONE:
2848         default:
2849                 fc_conf->mode = RTE_FC_NONE;
2850         };
2851
2852         return 0;
2853 }
2854
2855 static int
2856 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2857 {
2858         uint32_t mflcn_reg, fctrl_reg, reg;
2859         uint32_t max_high_water;
2860         uint8_t i, aq_failure;
2861         int err;
2862         struct i40e_hw *hw;
2863         struct i40e_pf *pf;
2864         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
2865                 [RTE_FC_NONE] = I40E_FC_NONE,
2866                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
2867                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
2868                 [RTE_FC_FULL] = I40E_FC_FULL
2869         };
2870
2871         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
2872
2873         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
2874         if ((fc_conf->high_water > max_high_water) ||
2875                         (fc_conf->high_water < fc_conf->low_water)) {
2876                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
2877                         "High_water must <= %d.", max_high_water);
2878                 return -EINVAL;
2879         }
2880
2881         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2882         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2883         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
2884
2885         pf->fc_conf.pause_time = fc_conf->pause_time;
2886         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
2887         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
2888
2889         PMD_INIT_FUNC_TRACE();
2890
2891         /* All the link flow control related enable/disable register
2892          * configuration is handle by the F/W
2893          */
2894         err = i40e_set_fc(hw, &aq_failure, true);
2895         if (err < 0)
2896                 return -ENOSYS;
2897
2898         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
2899                 /* Configure flow control refresh threshold,
2900                  * the value for stat_tx_pause_refresh_timer[8]
2901                  * is used for global pause operation.
2902                  */
2903
2904                 I40E_WRITE_REG(hw,
2905                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
2906                                pf->fc_conf.pause_time);
2907
2908                 /* configure the timer value included in transmitted pause
2909                  * frame,
2910                  * the value for stat_tx_pause_quanta[8] is used for global
2911                  * pause operation
2912                  */
2913                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
2914                                pf->fc_conf.pause_time);
2915
2916                 fctrl_reg = I40E_READ_REG(hw,
2917                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
2918
2919                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2920                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
2921                 else
2922                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
2923
2924                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
2925                                fctrl_reg);
2926         } else {
2927                 /* Configure pause time (2 TCs per register) */
2928                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
2929                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
2930                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
2931
2932                 /* Configure flow control refresh threshold value */
2933                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
2934                                pf->fc_conf.pause_time / 2);
2935
2936                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2937
2938                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
2939                  *depending on configuration
2940                  */
2941                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
2942                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
2943                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
2944                 } else {
2945                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
2946                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
2947                 }
2948
2949                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
2950         }
2951
2952         /* config the water marker both based on the packets and bytes */
2953         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
2954                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2955                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2956         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
2957                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2958                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2959         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
2960                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2961                        << I40E_KILOSHIFT);
2962         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
2963                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2964                        << I40E_KILOSHIFT);
2965
2966         I40E_WRITE_FLUSH(hw);
2967
2968         return 0;
2969 }
2970
2971 static int
2972 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
2973                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
2974 {
2975         PMD_INIT_FUNC_TRACE();
2976
2977         return -ENOSYS;
2978 }
2979
2980 /* Add a MAC address, and update filters */
2981 static void
2982 i40e_macaddr_add(struct rte_eth_dev *dev,
2983                  struct ether_addr *mac_addr,
2984                  __rte_unused uint32_t index,
2985                  uint32_t pool)
2986 {
2987         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2988         struct i40e_mac_filter_info mac_filter;
2989         struct i40e_vsi *vsi;
2990         int ret;
2991
2992         /* If VMDQ not enabled or configured, return */
2993         if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
2994                           !pf->nb_cfg_vmdq_vsi)) {
2995                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
2996                         pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
2997                         pool);
2998                 return;
2999         }
3000
3001         if (pool > pf->nb_cfg_vmdq_vsi) {
3002                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3003                                 pool, pf->nb_cfg_vmdq_vsi);
3004                 return;
3005         }
3006
3007         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3008         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3009                 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3010         else
3011                 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3012
3013         if (pool == 0)
3014                 vsi = pf->main_vsi;
3015         else
3016                 vsi = pf->vmdq[pool - 1].vsi;
3017
3018         ret = i40e_vsi_add_mac(vsi, &mac_filter);
3019         if (ret != I40E_SUCCESS) {
3020                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3021                 return;
3022         }
3023 }
3024
3025 /* Remove a MAC address, and update filters */
3026 static void
3027 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3028 {
3029         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3030         struct i40e_vsi *vsi;
3031         struct rte_eth_dev_data *data = dev->data;
3032         struct ether_addr *macaddr;
3033         int ret;
3034         uint32_t i;
3035         uint64_t pool_sel;
3036
3037         macaddr = &(data->mac_addrs[index]);
3038
3039         pool_sel = dev->data->mac_pool_sel[index];
3040
3041         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3042                 if (pool_sel & (1ULL << i)) {
3043                         if (i == 0)
3044                                 vsi = pf->main_vsi;
3045                         else {
3046                                 /* No VMDQ pool enabled or configured */
3047                                 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3048                                         (i > pf->nb_cfg_vmdq_vsi)) {
3049                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
3050                                                         "/configured");
3051                                         return;
3052                                 }
3053                                 vsi = pf->vmdq[i - 1].vsi;
3054                         }
3055                         ret = i40e_vsi_delete_mac(vsi, macaddr);
3056
3057                         if (ret) {
3058                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3059                                 return;
3060                         }
3061                 }
3062         }
3063 }
3064
3065 /* Set perfect match or hash match of MAC and VLAN for a VF */
3066 static int
3067 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3068                  struct rte_eth_mac_filter *filter,
3069                  bool add)
3070 {
3071         struct i40e_hw *hw;
3072         struct i40e_mac_filter_info mac_filter;
3073         struct ether_addr old_mac;
3074         struct ether_addr *new_mac;
3075         struct i40e_pf_vf *vf = NULL;
3076         uint16_t vf_id;
3077         int ret;
3078
3079         if (pf == NULL) {
3080                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3081                 return -EINVAL;
3082         }
3083         hw = I40E_PF_TO_HW(pf);
3084
3085         if (filter == NULL) {
3086                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3087                 return -EINVAL;
3088         }
3089
3090         new_mac = &filter->mac_addr;
3091
3092         if (is_zero_ether_addr(new_mac)) {
3093                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3094                 return -EINVAL;
3095         }
3096
3097         vf_id = filter->dst_id;
3098
3099         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3100                 PMD_DRV_LOG(ERR, "Invalid argument.");
3101                 return -EINVAL;
3102         }
3103         vf = &pf->vfs[vf_id];
3104
3105         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3106                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3107                 return -EINVAL;
3108         }
3109
3110         if (add) {
3111                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3112                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3113                                 ETHER_ADDR_LEN);
3114                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3115                                  ETHER_ADDR_LEN);
3116
3117                 mac_filter.filter_type = filter->filter_type;
3118                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3119                 if (ret != I40E_SUCCESS) {
3120                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3121                         return -1;
3122                 }
3123                 ether_addr_copy(new_mac, &pf->dev_addr);
3124         } else {
3125                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3126                                 ETHER_ADDR_LEN);
3127                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3128                 if (ret != I40E_SUCCESS) {
3129                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3130                         return -1;
3131                 }
3132
3133                 /* Clear device address as it has been removed */
3134                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3135                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3136         }
3137
3138         return 0;
3139 }
3140
3141 /* MAC filter handle */
3142 static int
3143 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3144                 void *arg)
3145 {
3146         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3147         struct rte_eth_mac_filter *filter;
3148         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3149         int ret = I40E_NOT_SUPPORTED;
3150
3151         filter = (struct rte_eth_mac_filter *)(arg);
3152
3153         switch (filter_op) {
3154         case RTE_ETH_FILTER_NOP:
3155                 ret = I40E_SUCCESS;
3156                 break;
3157         case RTE_ETH_FILTER_ADD:
3158                 i40e_pf_disable_irq0(hw);
3159                 if (filter->is_vf)
3160                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
3161                 i40e_pf_enable_irq0(hw);
3162                 break;
3163         case RTE_ETH_FILTER_DELETE:
3164                 i40e_pf_disable_irq0(hw);
3165                 if (filter->is_vf)
3166                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
3167                 i40e_pf_enable_irq0(hw);
3168                 break;
3169         default:
3170                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3171                 ret = I40E_ERR_PARAM;
3172                 break;
3173         }
3174
3175         return ret;
3176 }
3177
3178 static int
3179 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3180 {
3181         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3182         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3183         int ret;
3184
3185         if (!lut)
3186                 return -EINVAL;
3187
3188         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3189                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3190                                           lut, lut_size);
3191                 if (ret) {
3192                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3193                         return ret;
3194                 }
3195         } else {
3196                 uint32_t *lut_dw = (uint32_t *)lut;
3197                 uint16_t i, lut_size_dw = lut_size / 4;
3198
3199                 for (i = 0; i < lut_size_dw; i++)
3200                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
3201         }
3202
3203         return 0;
3204 }
3205
3206 static int
3207 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3208 {
3209         struct i40e_pf *pf;
3210         struct i40e_hw *hw;
3211         int ret;
3212
3213         if (!vsi || !lut)
3214                 return -EINVAL;
3215
3216         pf = I40E_VSI_TO_PF(vsi);
3217         hw = I40E_VSI_TO_HW(vsi);
3218
3219         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3220                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3221                                           lut, lut_size);
3222                 if (ret) {
3223                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3224                         return ret;
3225                 }
3226         } else {
3227                 uint32_t *lut_dw = (uint32_t *)lut;
3228                 uint16_t i, lut_size_dw = lut_size / 4;
3229
3230                 for (i = 0; i < lut_size_dw; i++)
3231                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
3232                 I40E_WRITE_FLUSH(hw);
3233         }
3234
3235         return 0;
3236 }
3237
3238 static int
3239 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3240                          struct rte_eth_rss_reta_entry64 *reta_conf,
3241                          uint16_t reta_size)
3242 {
3243         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3244         uint16_t i, lut_size = pf->hash_lut_size;
3245         uint16_t idx, shift;
3246         uint8_t *lut;
3247         int ret;
3248
3249         if (reta_size != lut_size ||
3250                 reta_size > ETH_RSS_RETA_SIZE_512) {
3251                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3252                         "(%d) doesn't match the number hardware can supported "
3253                                         "(%d)\n", reta_size, lut_size);
3254                 return -EINVAL;
3255         }
3256
3257         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3258         if (!lut) {
3259                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3260                 return -ENOMEM;
3261         }
3262         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3263         if (ret)
3264                 goto out;
3265         for (i = 0; i < reta_size; i++) {
3266                 idx = i / RTE_RETA_GROUP_SIZE;
3267                 shift = i % RTE_RETA_GROUP_SIZE;
3268                 if (reta_conf[idx].mask & (1ULL << shift))
3269                         lut[i] = reta_conf[idx].reta[shift];
3270         }
3271         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3272
3273 out:
3274         rte_free(lut);
3275
3276         return ret;
3277 }
3278
3279 static int
3280 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3281                         struct rte_eth_rss_reta_entry64 *reta_conf,
3282                         uint16_t reta_size)
3283 {
3284         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3285         uint16_t i, lut_size = pf->hash_lut_size;
3286         uint16_t idx, shift;
3287         uint8_t *lut;
3288         int ret;
3289
3290         if (reta_size != lut_size ||
3291                 reta_size > ETH_RSS_RETA_SIZE_512) {
3292                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3293                         "(%d) doesn't match the number hardware can supported "
3294                                         "(%d)\n", reta_size, lut_size);
3295                 return -EINVAL;
3296         }
3297
3298         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3299         if (!lut) {
3300                 PMD_DRV_LOG(ERR, "No memory can be allocated");
3301                 return -ENOMEM;
3302         }
3303
3304         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3305         if (ret)
3306                 goto out;
3307         for (i = 0; i < reta_size; i++) {
3308                 idx = i / RTE_RETA_GROUP_SIZE;
3309                 shift = i % RTE_RETA_GROUP_SIZE;
3310                 if (reta_conf[idx].mask & (1ULL << shift))
3311                         reta_conf[idx].reta[shift] = lut[i];
3312         }
3313
3314 out:
3315         rte_free(lut);
3316
3317         return ret;
3318 }
3319
3320 /**
3321  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3322  * @hw:   pointer to the HW structure
3323  * @mem:  pointer to mem struct to fill out
3324  * @size: size of memory requested
3325  * @alignment: what to align the allocation to
3326  **/
3327 enum i40e_status_code
3328 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3329                         struct i40e_dma_mem *mem,
3330                         u64 size,
3331                         u32 alignment)
3332 {
3333         const struct rte_memzone *mz = NULL;
3334         char z_name[RTE_MEMZONE_NAMESIZE];
3335
3336         if (!mem)
3337                 return I40E_ERR_PARAM;
3338
3339         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3340         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3341                                          alignment, RTE_PGSIZE_2M);
3342         if (!mz)
3343                 return I40E_ERR_NO_MEMORY;
3344
3345         mem->size = size;
3346         mem->va = mz->addr;
3347         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
3348         mem->zone = (const void *)mz;
3349         PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
3350                     "%"PRIu64, mz->name, mem->pa);
3351
3352         return I40E_SUCCESS;
3353 }
3354
3355 /**
3356  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3357  * @hw:   pointer to the HW structure
3358  * @mem:  ptr to mem struct to free
3359  **/
3360 enum i40e_status_code
3361 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3362                     struct i40e_dma_mem *mem)
3363 {
3364         if (!mem)
3365                 return I40E_ERR_PARAM;
3366
3367         PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
3368                     "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
3369                     mem->pa);
3370         rte_memzone_free((const struct rte_memzone *)mem->zone);
3371         mem->zone = NULL;
3372         mem->va = NULL;
3373         mem->pa = (u64)0;
3374
3375         return I40E_SUCCESS;
3376 }
3377
3378 /**
3379  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3380  * @hw:   pointer to the HW structure
3381  * @mem:  pointer to mem struct to fill out
3382  * @size: size of memory requested
3383  **/
3384 enum i40e_status_code
3385 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3386                          struct i40e_virt_mem *mem,
3387                          u32 size)
3388 {
3389         if (!mem)
3390                 return I40E_ERR_PARAM;
3391
3392         mem->size = size;
3393         mem->va = rte_zmalloc("i40e", size, 0);
3394
3395         if (mem->va)
3396                 return I40E_SUCCESS;
3397         else
3398                 return I40E_ERR_NO_MEMORY;
3399 }
3400
3401 /**
3402  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3403  * @hw:   pointer to the HW structure
3404  * @mem:  pointer to mem struct to free
3405  **/
3406 enum i40e_status_code
3407 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3408                      struct i40e_virt_mem *mem)
3409 {
3410         if (!mem)
3411                 return I40E_ERR_PARAM;
3412
3413         rte_free(mem->va);
3414         mem->va = NULL;
3415
3416         return I40E_SUCCESS;
3417 }
3418
3419 void
3420 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3421 {
3422         rte_spinlock_init(&sp->spinlock);
3423 }
3424
3425 void
3426 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3427 {
3428         rte_spinlock_lock(&sp->spinlock);
3429 }
3430
3431 void
3432 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3433 {
3434         rte_spinlock_unlock(&sp->spinlock);
3435 }
3436
3437 void
3438 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3439 {
3440         return;
3441 }
3442
3443 /**
3444  * Get the hardware capabilities, which will be parsed
3445  * and saved into struct i40e_hw.
3446  */
3447 static int
3448 i40e_get_cap(struct i40e_hw *hw)
3449 {
3450         struct i40e_aqc_list_capabilities_element_resp *buf;
3451         uint16_t len, size = 0;
3452         int ret;
3453
3454         /* Calculate a huge enough buff for saving response data temporarily */
3455         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3456                                                 I40E_MAX_CAP_ELE_NUM;
3457         buf = rte_zmalloc("i40e", len, 0);
3458         if (!buf) {
3459                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3460                 return I40E_ERR_NO_MEMORY;
3461         }
3462
3463         /* Get, parse the capabilities and save it to hw */
3464         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3465                         i40e_aqc_opc_list_func_capabilities, NULL);
3466         if (ret != I40E_SUCCESS)
3467                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3468
3469         /* Free the temporary buffer after being used */
3470         rte_free(buf);
3471
3472         return ret;
3473 }
3474
3475 static int
3476 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3477 {
3478         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3479         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3480         uint16_t qp_count = 0, vsi_count = 0;
3481
3482         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3483                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3484                 return -EINVAL;
3485         }
3486         /* Add the parameter init for LFC */
3487         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3488         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3489         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3490
3491         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3492         pf->max_num_vsi = hw->func_caps.num_vsis;
3493         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3494         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3495         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3496
3497         /* FDir queue/VSI allocation */
3498         pf->fdir_qp_offset = 0;
3499         if (hw->func_caps.fd) {
3500                 pf->flags |= I40E_FLAG_FDIR;
3501                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3502         } else {
3503                 pf->fdir_nb_qps = 0;
3504         }
3505         qp_count += pf->fdir_nb_qps;
3506         vsi_count += 1;
3507
3508         /* LAN queue/VSI allocation */
3509         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3510         if (!hw->func_caps.rss) {
3511                 pf->lan_nb_qps = 1;
3512         } else {
3513                 pf->flags |= I40E_FLAG_RSS;
3514                 if (hw->mac.type == I40E_MAC_X722)
3515                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3516                 pf->lan_nb_qps = pf->lan_nb_qp_max;
3517         }
3518         qp_count += pf->lan_nb_qps;
3519         vsi_count += 1;
3520
3521         /* VF queue/VSI allocation */
3522         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3523         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
3524                 pf->flags |= I40E_FLAG_SRIOV;
3525                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3526                 pf->vf_num = dev->pci_dev->max_vfs;
3527                 PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
3528                             "in total %u queues", pf->vf_num, pf->vf_nb_qps,
3529                             pf->vf_nb_qps * pf->vf_num);
3530         } else {
3531                 pf->vf_nb_qps = 0;
3532                 pf->vf_num = 0;
3533         }
3534         qp_count += pf->vf_nb_qps * pf->vf_num;
3535         vsi_count += pf->vf_num;
3536
3537         /* VMDq queue/VSI allocation */
3538         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3539         pf->vmdq_nb_qps = 0;
3540         pf->max_nb_vmdq_vsi = 0;
3541         if (hw->func_caps.vmdq) {
3542                 if (qp_count < hw->func_caps.num_tx_qp &&
3543                         vsi_count < hw->func_caps.num_vsis) {
3544                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
3545                                 qp_count) / pf->vmdq_nb_qp_max;
3546
3547                         /* Limit the maximum number of VMDq vsi to the maximum
3548                          * ethdev can support
3549                          */
3550                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3551                                 hw->func_caps.num_vsis - vsi_count);
3552                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3553                                 ETH_64_POOLS);
3554                         if (pf->max_nb_vmdq_vsi) {
3555                                 pf->flags |= I40E_FLAG_VMDQ;
3556                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3557                                 PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
3558                                             "per VMDQ VSI, in total %u queues",
3559                                             pf->max_nb_vmdq_vsi,
3560                                             pf->vmdq_nb_qps, pf->vmdq_nb_qps *
3561                                             pf->max_nb_vmdq_vsi);
3562                         } else {
3563                                 PMD_DRV_LOG(INFO, "No enough queues left for "
3564                                             "VMDq");
3565                         }
3566                 } else {
3567                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
3568                 }
3569         }
3570         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3571         vsi_count += pf->max_nb_vmdq_vsi;
3572
3573         if (hw->func_caps.dcb)
3574                 pf->flags |= I40E_FLAG_DCB;
3575
3576         if (qp_count > hw->func_caps.num_tx_qp) {
3577                 PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
3578                             "the hardware maximum %u", qp_count,
3579                             hw->func_caps.num_tx_qp);
3580                 return -EINVAL;
3581         }
3582         if (vsi_count > hw->func_caps.num_vsis) {
3583                 PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
3584                             "the hardware maximum %u", vsi_count,
3585                             hw->func_caps.num_vsis);
3586                 return -EINVAL;
3587         }
3588
3589         return 0;
3590 }
3591
3592 static int
3593 i40e_pf_get_switch_config(struct i40e_pf *pf)
3594 {
3595         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3596         struct i40e_aqc_get_switch_config_resp *switch_config;
3597         struct i40e_aqc_switch_config_element_resp *element;
3598         uint16_t start_seid = 0, num_reported;
3599         int ret;
3600
3601         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3602                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3603         if (!switch_config) {
3604                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
3605                 return -ENOMEM;
3606         }
3607
3608         /* Get the switch configurations */
3609         ret = i40e_aq_get_switch_config(hw, switch_config,
3610                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
3611         if (ret != I40E_SUCCESS) {
3612                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3613                 goto fail;
3614         }
3615         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3616         if (num_reported != 1) { /* The number should be 1 */
3617                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3618                 goto fail;
3619         }
3620
3621         /* Parse the switch configuration elements */
3622         element = &(switch_config->element[0]);
3623         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3624                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3625                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3626         } else
3627                 PMD_DRV_LOG(INFO, "Unknown element type");
3628
3629 fail:
3630         rte_free(switch_config);
3631
3632         return ret;
3633 }
3634
3635 static int
3636 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
3637                         uint32_t num)
3638 {
3639         struct pool_entry *entry;
3640
3641         if (pool == NULL || num == 0)
3642                 return -EINVAL;
3643
3644         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
3645         if (entry == NULL) {
3646                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
3647                 return -ENOMEM;
3648         }
3649
3650         /* queue heap initialize */
3651         pool->num_free = num;
3652         pool->num_alloc = 0;
3653         pool->base = base;
3654         LIST_INIT(&pool->alloc_list);
3655         LIST_INIT(&pool->free_list);
3656
3657         /* Initialize element  */
3658         entry->base = 0;
3659         entry->len = num;
3660
3661         LIST_INSERT_HEAD(&pool->free_list, entry, next);
3662         return 0;
3663 }
3664
3665 static void
3666 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
3667 {
3668         struct pool_entry *entry, *next_entry;
3669
3670         if (pool == NULL)
3671                 return;
3672
3673         for (entry = LIST_FIRST(&pool->alloc_list);
3674                         entry && (next_entry = LIST_NEXT(entry, next), 1);
3675                         entry = next_entry) {
3676                 LIST_REMOVE(entry, next);
3677                 rte_free(entry);
3678         }
3679
3680         for (entry = LIST_FIRST(&pool->free_list);
3681                         entry && (next_entry = LIST_NEXT(entry, next), 1);
3682                         entry = next_entry) {
3683                 LIST_REMOVE(entry, next);
3684                 rte_free(entry);
3685         }
3686
3687         pool->num_free = 0;
3688         pool->num_alloc = 0;
3689         pool->base = 0;
3690         LIST_INIT(&pool->alloc_list);
3691         LIST_INIT(&pool->free_list);
3692 }
3693
3694 static int
3695 i40e_res_pool_free(struct i40e_res_pool_info *pool,
3696                        uint32_t base)
3697 {
3698         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
3699         uint32_t pool_offset;
3700         int insert;
3701
3702         if (pool == NULL) {
3703                 PMD_DRV_LOG(ERR, "Invalid parameter");
3704                 return -EINVAL;
3705         }
3706
3707         pool_offset = base - pool->base;
3708         /* Lookup in alloc list */
3709         LIST_FOREACH(entry, &pool->alloc_list, next) {
3710                 if (entry->base == pool_offset) {
3711                         valid_entry = entry;
3712                         LIST_REMOVE(entry, next);
3713                         break;
3714                 }
3715         }
3716
3717         /* Not find, return */
3718         if (valid_entry == NULL) {
3719                 PMD_DRV_LOG(ERR, "Failed to find entry");
3720                 return -EINVAL;
3721         }
3722
3723         /**
3724          * Found it, move it to free list  and try to merge.
3725          * In order to make merge easier, always sort it by qbase.
3726          * Find adjacent prev and last entries.
3727          */
3728         prev = next = NULL;
3729         LIST_FOREACH(entry, &pool->free_list, next) {
3730                 if (entry->base > valid_entry->base) {
3731                         next = entry;
3732                         break;
3733                 }
3734                 prev = entry;
3735         }
3736
3737         insert = 0;
3738         /* Try to merge with next one*/
3739         if (next != NULL) {
3740                 /* Merge with next one */
3741                 if (valid_entry->base + valid_entry->len == next->base) {
3742                         next->base = valid_entry->base;
3743                         next->len += valid_entry->len;
3744                         rte_free(valid_entry);
3745                         valid_entry = next;
3746                         insert = 1;
3747                 }
3748         }
3749
3750         if (prev != NULL) {
3751                 /* Merge with previous one */
3752                 if (prev->base + prev->len == valid_entry->base) {
3753                         prev->len += valid_entry->len;
3754                         /* If it merge with next one, remove next node */
3755                         if (insert == 1) {
3756                                 LIST_REMOVE(valid_entry, next);
3757                                 rte_free(valid_entry);
3758                         } else {
3759                                 rte_free(valid_entry);
3760                                 insert = 1;
3761                         }
3762                 }
3763         }
3764
3765         /* Not find any entry to merge, insert */
3766         if (insert == 0) {
3767                 if (prev != NULL)
3768                         LIST_INSERT_AFTER(prev, valid_entry, next);
3769                 else if (next != NULL)
3770                         LIST_INSERT_BEFORE(next, valid_entry, next);
3771                 else /* It's empty list, insert to head */
3772                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
3773         }
3774
3775         pool->num_free += valid_entry->len;
3776         pool->num_alloc -= valid_entry->len;
3777
3778         return 0;
3779 }
3780
3781 static int
3782 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
3783                        uint16_t num)
3784 {
3785         struct pool_entry *entry, *valid_entry;
3786
3787         if (pool == NULL || num == 0) {
3788                 PMD_DRV_LOG(ERR, "Invalid parameter");
3789                 return -EINVAL;
3790         }
3791
3792         if (pool->num_free < num) {
3793                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
3794                             num, pool->num_free);
3795                 return -ENOMEM;
3796         }
3797
3798         valid_entry = NULL;
3799         /* Lookup  in free list and find most fit one */
3800         LIST_FOREACH(entry, &pool->free_list, next) {
3801                 if (entry->len >= num) {
3802                         /* Find best one */
3803                         if (entry->len == num) {
3804                                 valid_entry = entry;
3805                                 break;
3806                         }
3807                         if (valid_entry == NULL || valid_entry->len > entry->len)
3808                                 valid_entry = entry;
3809                 }
3810         }
3811
3812         /* Not find one to satisfy the request, return */
3813         if (valid_entry == NULL) {
3814                 PMD_DRV_LOG(ERR, "No valid entry found");
3815                 return -ENOMEM;
3816         }
3817         /**
3818          * The entry have equal queue number as requested,
3819          * remove it from alloc_list.
3820          */
3821         if (valid_entry->len == num) {
3822                 LIST_REMOVE(valid_entry, next);
3823         } else {
3824                 /**
3825                  * The entry have more numbers than requested,
3826                  * create a new entry for alloc_list and minus its
3827                  * queue base and number in free_list.
3828                  */
3829                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
3830                 if (entry == NULL) {
3831                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3832                                     "resource pool");
3833                         return -ENOMEM;
3834                 }
3835                 entry->base = valid_entry->base;
3836                 entry->len = num;
3837                 valid_entry->base += num;
3838                 valid_entry->len -= num;
3839                 valid_entry = entry;
3840         }
3841
3842         /* Insert it into alloc list, not sorted */
3843         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
3844
3845         pool->num_free -= valid_entry->len;
3846         pool->num_alloc += valid_entry->len;
3847
3848         return valid_entry->base + pool->base;
3849 }
3850
3851 /**
3852  * bitmap_is_subset - Check whether src2 is subset of src1
3853  **/
3854 static inline int
3855 bitmap_is_subset(uint8_t src1, uint8_t src2)
3856 {
3857         return !((src1 ^ src2) & src2);
3858 }
3859
3860 static enum i40e_status_code
3861 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3862 {
3863         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3864
3865         /* If DCB is not supported, only default TC is supported */
3866         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
3867                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
3868                 return I40E_NOT_SUPPORTED;
3869         }
3870
3871         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
3872                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
3873                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
3874                             enabled_tcmap);
3875                 return I40E_NOT_SUPPORTED;
3876         }
3877         return I40E_SUCCESS;
3878 }
3879
3880 int
3881 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
3882                                 struct i40e_vsi_vlan_pvid_info *info)
3883 {
3884         struct i40e_hw *hw;
3885         struct i40e_vsi_context ctxt;
3886         uint8_t vlan_flags = 0;
3887         int ret;
3888
3889         if (vsi == NULL || info == NULL) {
3890                 PMD_DRV_LOG(ERR, "invalid parameters");
3891                 return I40E_ERR_PARAM;
3892         }
3893
3894         if (info->on) {
3895                 vsi->info.pvid = info->config.pvid;
3896                 /**
3897                  * If insert pvid is enabled, only tagged pkts are
3898                  * allowed to be sent out.
3899                  */
3900                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
3901                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3902         } else {
3903                 vsi->info.pvid = 0;
3904                 if (info->config.reject.tagged == 0)
3905                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3906
3907                 if (info->config.reject.untagged == 0)
3908                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
3909         }
3910         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
3911                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
3912         vsi->info.port_vlan_flags |= vlan_flags;
3913         vsi->info.valid_sections =
3914                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3915         memset(&ctxt, 0, sizeof(ctxt));
3916         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3917         ctxt.seid = vsi->seid;
3918
3919         hw = I40E_VSI_TO_HW(vsi);
3920         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3921         if (ret != I40E_SUCCESS)
3922                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3923
3924         return ret;
3925 }
3926
3927 static int
3928 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3929 {
3930         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3931         int i, ret;
3932         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
3933
3934         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3935         if (ret != I40E_SUCCESS)
3936                 return ret;
3937
3938         if (!vsi->seid) {
3939                 PMD_DRV_LOG(ERR, "seid not valid");
3940                 return -EINVAL;
3941         }
3942
3943         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
3944         tc_bw_data.tc_valid_bits = enabled_tcmap;
3945         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3946                 tc_bw_data.tc_bw_credits[i] =
3947                         (enabled_tcmap & (1 << i)) ? 1 : 0;
3948
3949         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
3950         if (ret != I40E_SUCCESS) {
3951                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
3952                 return ret;
3953         }
3954
3955         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
3956                                         sizeof(vsi->info.qs_handle));
3957         return I40E_SUCCESS;
3958 }
3959
3960 static enum i40e_status_code
3961 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
3962                                  struct i40e_aqc_vsi_properties_data *info,
3963                                  uint8_t enabled_tcmap)
3964 {
3965         enum i40e_status_code ret;
3966         int i, total_tc = 0;
3967         uint16_t qpnum_per_tc, bsf, qp_idx;
3968
3969         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3970         if (ret != I40E_SUCCESS)
3971                 return ret;
3972
3973         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3974                 if (enabled_tcmap & (1 << i))
3975                         total_tc++;
3976         vsi->enabled_tc = enabled_tcmap;
3977
3978         /* Number of queues per enabled TC */
3979         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
3980         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
3981         bsf = rte_bsf32(qpnum_per_tc);
3982
3983         /* Adjust the queue number to actual queues that can be applied */
3984         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
3985                 vsi->nb_qps = qpnum_per_tc * total_tc;
3986
3987         /**
3988          * Configure TC and queue mapping parameters, for enabled TC,
3989          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
3990          * default queue will serve it.
3991          */
3992         qp_idx = 0;
3993         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3994                 if (vsi->enabled_tc & (1 << i)) {
3995                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
3996                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
3997                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
3998                         qp_idx += qpnum_per_tc;
3999                 } else
4000                         info->tc_mapping[i] = 0;
4001         }
4002
4003         /* Associate queue number with VSI */
4004         if (vsi->type == I40E_VSI_SRIOV) {
4005                 info->mapping_flags |=
4006                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4007                 for (i = 0; i < vsi->nb_qps; i++)
4008                         info->queue_mapping[i] =
4009                                 rte_cpu_to_le_16(vsi->base_queue + i);
4010         } else {
4011                 info->mapping_flags |=
4012                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4013                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4014         }
4015         info->valid_sections |=
4016                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4017
4018         return I40E_SUCCESS;
4019 }
4020
4021 static int
4022 i40e_veb_release(struct i40e_veb *veb)
4023 {
4024         struct i40e_vsi *vsi;
4025         struct i40e_hw *hw;
4026
4027         if (veb == NULL)
4028                 return -EINVAL;
4029
4030         if (!TAILQ_EMPTY(&veb->head)) {
4031                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4032                 return -EACCES;
4033         }
4034         /* associate_vsi field is NULL for floating VEB */
4035         if (veb->associate_vsi != NULL) {
4036                 vsi = veb->associate_vsi;
4037                 hw = I40E_VSI_TO_HW(vsi);
4038
4039                 vsi->uplink_seid = veb->uplink_seid;
4040                 vsi->veb = NULL;
4041         } else {
4042                 veb->associate_pf->main_vsi->floating_veb = NULL;
4043                 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4044         }
4045
4046         i40e_aq_delete_element(hw, veb->seid, NULL);
4047         rte_free(veb);
4048         return I40E_SUCCESS;
4049 }
4050
4051 /* Setup a veb */
4052 static struct i40e_veb *
4053 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4054 {
4055         struct i40e_veb *veb;
4056         int ret;
4057         struct i40e_hw *hw;
4058
4059         if (pf == NULL) {
4060                 PMD_DRV_LOG(ERR,
4061                             "veb setup failed, associated PF shouldn't null");
4062                 return NULL;
4063         }
4064         hw = I40E_PF_TO_HW(pf);
4065
4066         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4067         if (!veb) {
4068                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4069                 goto fail;
4070         }
4071
4072         veb->associate_vsi = vsi;
4073         veb->associate_pf = pf;
4074         TAILQ_INIT(&veb->head);
4075         veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4076
4077         /* create floating veb if vsi is NULL */
4078         if (vsi != NULL) {
4079                 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4080                                       I40E_DEFAULT_TCMAP, false,
4081                                       &veb->seid, false, NULL);
4082         } else {
4083                 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4084                                       true, &veb->seid, false, NULL);
4085         }
4086
4087         if (ret != I40E_SUCCESS) {
4088                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4089                             hw->aq.asq_last_status);
4090                 goto fail;
4091         }
4092
4093         /* get statistics index */
4094         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4095                                 &veb->stats_idx, NULL, NULL, NULL);
4096         if (ret != I40E_SUCCESS) {
4097                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
4098                             hw->aq.asq_last_status);
4099                 goto fail;
4100         }
4101         /* Get VEB bandwidth, to be implemented */
4102         /* Now associated vsi binding to the VEB, set uplink to this VEB */
4103         if (vsi)
4104                 vsi->uplink_seid = veb->seid;
4105
4106         return veb;
4107 fail:
4108         rte_free(veb);
4109         return NULL;
4110 }
4111
4112 int
4113 i40e_vsi_release(struct i40e_vsi *vsi)
4114 {
4115         struct i40e_pf *pf;
4116         struct i40e_hw *hw;
4117         struct i40e_vsi_list *vsi_list;
4118         void *temp;
4119         int ret;
4120         struct i40e_mac_filter *f;
4121         uint16_t user_param;
4122
4123         if (!vsi)
4124                 return I40E_SUCCESS;
4125
4126         user_param = vsi->user_param;
4127
4128         pf = I40E_VSI_TO_PF(vsi);
4129         hw = I40E_VSI_TO_HW(vsi);
4130
4131         /* VSI has child to attach, release child first */
4132         if (vsi->veb) {
4133                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4134                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4135                                 return -1;
4136                 }
4137                 i40e_veb_release(vsi->veb);
4138         }
4139
4140         if (vsi->floating_veb) {
4141                 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4142                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4143                                 return -1;
4144                 }
4145         }
4146
4147         /* Remove all macvlan filters of the VSI */
4148         i40e_vsi_remove_all_macvlan_filter(vsi);
4149         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4150                 rte_free(f);
4151
4152         if (vsi->type != I40E_VSI_MAIN &&
4153             ((vsi->type != I40E_VSI_SRIOV) ||
4154             !pf->floating_veb_list[user_param])) {
4155                 /* Remove vsi from parent's sibling list */
4156                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4157                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4158                         return I40E_ERR_PARAM;
4159                 }
4160                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4161                                 &vsi->sib_vsi_list, list);
4162
4163                 /* Remove all switch element of the VSI */
4164                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4165                 if (ret != I40E_SUCCESS)
4166                         PMD_DRV_LOG(ERR, "Failed to delete element");
4167         }
4168
4169         if ((vsi->type == I40E_VSI_SRIOV) &&
4170             pf->floating_veb_list[user_param]) {
4171                 /* Remove vsi from parent's sibling list */
4172                 if (vsi->parent_vsi == NULL ||
4173                     vsi->parent_vsi->floating_veb == NULL) {
4174                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4175                         return I40E_ERR_PARAM;
4176                 }
4177                 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4178                              &vsi->sib_vsi_list, list);
4179
4180                 /* Remove all switch element of the VSI */
4181                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4182                 if (ret != I40E_SUCCESS)
4183                         PMD_DRV_LOG(ERR, "Failed to delete element");
4184         }
4185
4186         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4187
4188         if (vsi->type != I40E_VSI_SRIOV)
4189                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4190         rte_free(vsi);
4191
4192         return I40E_SUCCESS;
4193 }
4194
4195 static int
4196 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4197 {
4198         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4199         struct i40e_aqc_remove_macvlan_element_data def_filter;
4200         struct i40e_mac_filter_info filter;
4201         int ret;
4202
4203         if (vsi->type != I40E_VSI_MAIN)
4204                 return I40E_ERR_CONFIG;
4205         memset(&def_filter, 0, sizeof(def_filter));
4206         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4207                                         ETH_ADDR_LEN);
4208         def_filter.vlan_tag = 0;
4209         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4210                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4211         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4212         if (ret != I40E_SUCCESS) {
4213                 struct i40e_mac_filter *f;
4214                 struct ether_addr *mac;
4215
4216                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
4217                             "macvlan filter");
4218                 /* It needs to add the permanent mac into mac list */
4219                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4220                 if (f == NULL) {
4221                         PMD_DRV_LOG(ERR, "failed to allocate memory");
4222                         return I40E_ERR_NO_MEMORY;
4223                 }
4224                 mac = &f->mac_info.mac_addr;
4225                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4226                                 ETH_ADDR_LEN);
4227                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4228                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4229                 vsi->mac_num++;
4230
4231                 return ret;
4232         }
4233         (void)rte_memcpy(&filter.mac_addr,
4234                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4235         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4236         return i40e_vsi_add_mac(vsi, &filter);
4237 }
4238
4239 /*
4240  * i40e_vsi_get_bw_config - Query VSI BW Information
4241  * @vsi: the VSI to be queried
4242  *
4243  * Returns 0 on success, negative value on failure
4244  */
4245 static enum i40e_status_code
4246 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4247 {
4248         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4249         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4250         struct i40e_hw *hw = &vsi->adapter->hw;
4251         i40e_status ret;
4252         int i;
4253         uint32_t bw_max;
4254
4255         memset(&bw_config, 0, sizeof(bw_config));
4256         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4257         if (ret != I40E_SUCCESS) {
4258                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4259                             hw->aq.asq_last_status);
4260                 return ret;
4261         }
4262
4263         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4264         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4265                                         &ets_sla_config, NULL);
4266         if (ret != I40E_SUCCESS) {
4267                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
4268                             "configuration %u", hw->aq.asq_last_status);
4269                 return ret;
4270         }
4271
4272         /* store and print out BW info */
4273         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4274         vsi->bw_info.bw_max = bw_config.max_bw;
4275         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4276         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4277         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4278                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4279                      I40E_16_BIT_WIDTH);
4280         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4281                 vsi->bw_info.bw_ets_share_credits[i] =
4282                                 ets_sla_config.share_credits[i];
4283                 vsi->bw_info.bw_ets_credits[i] =
4284                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
4285                 /* 4 bits per TC, 4th bit is reserved */
4286                 vsi->bw_info.bw_ets_max[i] =
4287                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4288                                   RTE_LEN2MASK(3, uint8_t));
4289                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4290                             vsi->bw_info.bw_ets_share_credits[i]);
4291                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4292                             vsi->bw_info.bw_ets_credits[i]);
4293                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4294                             vsi->bw_info.bw_ets_max[i]);
4295         }
4296
4297         return I40E_SUCCESS;
4298 }
4299
4300 /* i40e_enable_pf_lb
4301  * @pf: pointer to the pf structure
4302  *
4303  * allow loopback on pf
4304  */
4305 static inline void
4306 i40e_enable_pf_lb(struct i40e_pf *pf)
4307 {
4308         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4309         struct i40e_vsi_context ctxt;
4310         int ret;
4311
4312         /* Use the FW API if FW >= v5.0 */
4313         if (hw->aq.fw_maj_ver < 5) {
4314                 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4315                 return;
4316         }
4317
4318         memset(&ctxt, 0, sizeof(ctxt));
4319         ctxt.seid = pf->main_vsi_seid;
4320         ctxt.pf_num = hw->pf_id;
4321         ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4322         if (ret) {
4323                 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4324                             ret, hw->aq.asq_last_status);
4325                 return;
4326         }
4327         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4328         ctxt.info.valid_sections =
4329                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4330         ctxt.info.switch_id |=
4331                 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4332
4333         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4334         if (ret)
4335                 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
4336                             hw->aq.asq_last_status);
4337 }
4338
4339 /* Setup a VSI */
4340 struct i40e_vsi *
4341 i40e_vsi_setup(struct i40e_pf *pf,
4342                enum i40e_vsi_type type,
4343                struct i40e_vsi *uplink_vsi,
4344                uint16_t user_param)
4345 {
4346         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4347         struct i40e_vsi *vsi;
4348         struct i40e_mac_filter_info filter;
4349         int ret;
4350         struct i40e_vsi_context ctxt;
4351         struct ether_addr broadcast =
4352                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4353
4354         if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4355             uplink_vsi == NULL) {
4356                 PMD_DRV_LOG(ERR, "VSI setup failed, "
4357                             "VSI link shouldn't be NULL");
4358                 return NULL;
4359         }
4360
4361         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4362                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
4363                             "uplink VSI should be NULL");
4364                 return NULL;
4365         }
4366
4367         /* two situations
4368          * 1.type is not MAIN and uplink vsi is not NULL
4369          * If uplink vsi didn't setup VEB, create one first under veb field
4370          * 2.type is SRIOV and the uplink is NULL
4371          * If floating VEB is NULL, create one veb under floating veb field
4372          */
4373
4374         if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
4375             uplink_vsi->veb == NULL) {
4376                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
4377
4378                 if (uplink_vsi->veb == NULL) {
4379                         PMD_DRV_LOG(ERR, "VEB setup failed");
4380                         return NULL;
4381                 }
4382                 /* set ALLOWLOOPBACk on pf, when veb is created */
4383                 i40e_enable_pf_lb(pf);
4384         }
4385
4386         if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
4387             pf->main_vsi->floating_veb == NULL) {
4388                 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
4389
4390                 if (pf->main_vsi->floating_veb == NULL) {
4391                         PMD_DRV_LOG(ERR, "VEB setup failed");
4392                         return NULL;
4393                 }
4394         }
4395
4396         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
4397         if (!vsi) {
4398                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
4399                 return NULL;
4400         }
4401         TAILQ_INIT(&vsi->mac_list);
4402         vsi->type = type;
4403         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
4404         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
4405         vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
4406         vsi->user_param = user_param;
4407         /* Allocate queues */
4408         switch (vsi->type) {
4409         case I40E_VSI_MAIN  :
4410                 vsi->nb_qps = pf->lan_nb_qps;
4411                 break;
4412         case I40E_VSI_SRIOV :
4413                 vsi->nb_qps = pf->vf_nb_qps;
4414                 break;
4415         case I40E_VSI_VMDQ2:
4416                 vsi->nb_qps = pf->vmdq_nb_qps;
4417                 break;
4418         case I40E_VSI_FDIR:
4419                 vsi->nb_qps = pf->fdir_nb_qps;
4420                 break;
4421         default:
4422                 goto fail_mem;
4423         }
4424         /*
4425          * The filter status descriptor is reported in rx queue 0,
4426          * while the tx queue for fdir filter programming has no
4427          * such constraints, can be non-zero queues.
4428          * To simplify it, choose FDIR vsi use queue 0 pair.
4429          * To make sure it will use queue 0 pair, queue allocation
4430          * need be done before this function is called
4431          */
4432         if (type != I40E_VSI_FDIR) {
4433                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
4434                         if (ret < 0) {
4435                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
4436                                                 vsi->seid, ret);
4437                                 goto fail_mem;
4438                         }
4439                         vsi->base_queue = ret;
4440         } else
4441                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
4442
4443         /* VF has MSIX interrupt in VF range, don't allocate here */
4444         if (type == I40E_VSI_MAIN) {
4445                 ret = i40e_res_pool_alloc(&pf->msix_pool,
4446                                           RTE_MIN(vsi->nb_qps,
4447                                                   RTE_MAX_RXTX_INTR_VEC_ID));
4448                 if (ret < 0) {
4449                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
4450                                     vsi->seid, ret);
4451                         goto fail_queue_alloc;
4452                 }
4453                 vsi->msix_intr = ret;
4454                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
4455         } else if (type != I40E_VSI_SRIOV) {
4456                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
4457                 if (ret < 0) {
4458                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
4459                         goto fail_queue_alloc;
4460                 }
4461                 vsi->msix_intr = ret;
4462                 vsi->nb_msix = 1;
4463         } else {
4464                 vsi->msix_intr = 0;
4465                 vsi->nb_msix = 0;
4466         }
4467
4468         /* Add VSI */
4469         if (type == I40E_VSI_MAIN) {
4470                 /* For main VSI, no need to add since it's default one */
4471                 vsi->uplink_seid = pf->mac_seid;
4472                 vsi->seid = pf->main_vsi_seid;
4473                 /* Bind queues with specific MSIX interrupt */
4474                 /**
4475                  * Needs 2 interrupt at least, one for misc cause which will
4476                  * enabled from OS side, Another for queues binding the
4477                  * interrupt from device side only.
4478                  */
4479
4480                 /* Get default VSI parameters from hardware */
4481                 memset(&ctxt, 0, sizeof(ctxt));
4482                 ctxt.seid = vsi->seid;
4483                 ctxt.pf_num = hw->pf_id;
4484                 ctxt.uplink_seid = vsi->uplink_seid;
4485                 ctxt.vf_num = 0;
4486                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4487                 if (ret != I40E_SUCCESS) {
4488                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
4489                         goto fail_msix_alloc;
4490                 }
4491                 (void)rte_memcpy(&vsi->info, &ctxt.info,
4492                         sizeof(struct i40e_aqc_vsi_properties_data));
4493                 vsi->vsi_id = ctxt.vsi_number;
4494                 vsi->info.valid_sections = 0;
4495
4496                 /* Configure tc, enabled TC0 only */
4497                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
4498                         I40E_SUCCESS) {
4499                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
4500                         goto fail_msix_alloc;
4501                 }
4502
4503                 /* TC, queue mapping */
4504                 memset(&ctxt, 0, sizeof(ctxt));
4505                 vsi->info.valid_sections |=
4506                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4507                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
4508                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4509                 (void)rte_memcpy(&ctxt.info, &vsi->info,
4510                         sizeof(struct i40e_aqc_vsi_properties_data));
4511                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4512                                                 I40E_DEFAULT_TCMAP);
4513                 if (ret != I40E_SUCCESS) {
4514                         PMD_DRV_LOG(ERR, "Failed to configure "
4515                                     "TC queue mapping");
4516                         goto fail_msix_alloc;
4517                 }
4518                 ctxt.seid = vsi->seid;
4519                 ctxt.pf_num = hw->pf_id;
4520                 ctxt.uplink_seid = vsi->uplink_seid;
4521                 ctxt.vf_num = 0;
4522
4523                 /* Update VSI parameters */
4524                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4525                 if (ret != I40E_SUCCESS) {
4526                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
4527                         goto fail_msix_alloc;
4528                 }
4529
4530                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
4531                                                 sizeof(vsi->info.tc_mapping));
4532                 (void)rte_memcpy(&vsi->info.queue_mapping,
4533                                 &ctxt.info.queue_mapping,
4534                         sizeof(vsi->info.queue_mapping));
4535                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
4536                 vsi->info.valid_sections = 0;
4537
4538                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
4539                                 ETH_ADDR_LEN);
4540
4541                 /**
4542                  * Updating default filter settings are necessary to prevent
4543                  * reception of tagged packets.
4544                  * Some old firmware configurations load a default macvlan
4545                  * filter which accepts both tagged and untagged packets.
4546                  * The updating is to use a normal filter instead if needed.
4547                  * For NVM 4.2.2 or after, the updating is not needed anymore.
4548                  * The firmware with correct configurations load the default
4549                  * macvlan filter which is expected and cannot be removed.
4550                  */
4551                 i40e_update_default_filter_setting(vsi);
4552                 i40e_config_qinq(hw, vsi);
4553         } else if (type == I40E_VSI_SRIOV) {
4554                 memset(&ctxt, 0, sizeof(ctxt));
4555                 /**
4556                  * For other VSI, the uplink_seid equals to uplink VSI's
4557                  * uplink_seid since they share same VEB
4558                  */
4559                 if (uplink_vsi == NULL)
4560                         vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
4561                 else
4562                         vsi->uplink_seid = uplink_vsi->uplink_seid;
4563                 ctxt.pf_num = hw->pf_id;
4564                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4565                 ctxt.uplink_seid = vsi->uplink_seid;
4566                 ctxt.connection_type = 0x1;
4567                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4568
4569                 /* Use the VEB configuration if FW >= v5.0 */
4570                 if (hw->aq.fw_maj_ver >= 5) {
4571                         /* Configure switch ID */
4572                         ctxt.info.valid_sections |=
4573                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4574                         ctxt.info.switch_id =
4575                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4576                 }
4577
4578                 /* Configure port/vlan */
4579                 ctxt.info.valid_sections |=
4580                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4581                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4582                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4583                                                 I40E_DEFAULT_TCMAP);
4584                 if (ret != I40E_SUCCESS) {
4585                         PMD_DRV_LOG(ERR, "Failed to configure "
4586                                     "TC queue mapping");
4587                         goto fail_msix_alloc;
4588                 }
4589                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4590                 ctxt.info.valid_sections |=
4591                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4592                 /**
4593                  * Since VSI is not created yet, only configure parameter,
4594                  * will add vsi below.
4595                  */
4596
4597                 i40e_config_qinq(hw, vsi);
4598         } else if (type == I40E_VSI_VMDQ2) {
4599                 memset(&ctxt, 0, sizeof(ctxt));
4600                 /*
4601                  * For other VSI, the uplink_seid equals to uplink VSI's
4602                  * uplink_seid since they share same VEB
4603                  */
4604                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4605                 ctxt.pf_num = hw->pf_id;
4606                 ctxt.vf_num = 0;
4607                 ctxt.uplink_seid = vsi->uplink_seid;
4608                 ctxt.connection_type = 0x1;
4609                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4610
4611                 ctxt.info.valid_sections |=
4612                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4613                 /* user_param carries flag to enable loop back */
4614                 if (user_param) {
4615                         ctxt.info.switch_id =
4616                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
4617                         ctxt.info.switch_id |=
4618                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4619                 }
4620
4621                 /* Configure port/vlan */
4622                 ctxt.info.valid_sections |=
4623                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4624                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4625                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4626                                                 I40E_DEFAULT_TCMAP);
4627                 if (ret != I40E_SUCCESS) {
4628                         PMD_DRV_LOG(ERR, "Failed to configure "
4629                                         "TC queue mapping");
4630                         goto fail_msix_alloc;
4631                 }
4632                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4633                 ctxt.info.valid_sections |=
4634                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4635         } else if (type == I40E_VSI_FDIR) {
4636                 memset(&ctxt, 0, sizeof(ctxt));
4637                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4638                 ctxt.pf_num = hw->pf_id;
4639                 ctxt.vf_num = 0;
4640                 ctxt.uplink_seid = vsi->uplink_seid;
4641                 ctxt.connection_type = 0x1;     /* regular data port */
4642                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4643                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4644                                                 I40E_DEFAULT_TCMAP);
4645                 if (ret != I40E_SUCCESS) {
4646                         PMD_DRV_LOG(ERR, "Failed to configure "
4647                                         "TC queue mapping.");
4648                         goto fail_msix_alloc;
4649                 }
4650                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4651                 ctxt.info.valid_sections |=
4652                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4653         } else {
4654                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
4655                 goto fail_msix_alloc;
4656         }
4657
4658         if (vsi->type != I40E_VSI_MAIN) {
4659                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
4660                 if (ret != I40E_SUCCESS) {
4661                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
4662                                     hw->aq.asq_last_status);
4663                         goto fail_msix_alloc;
4664                 }
4665                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
4666                 vsi->info.valid_sections = 0;
4667                 vsi->seid = ctxt.seid;
4668                 vsi->vsi_id = ctxt.vsi_number;
4669                 vsi->sib_vsi_list.vsi = vsi;
4670                 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
4671                         TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
4672                                           &vsi->sib_vsi_list, list);
4673                 } else {
4674                         TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
4675                                           &vsi->sib_vsi_list, list);
4676                 }
4677         }
4678
4679         /* MAC/VLAN configuration */
4680         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
4681         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4682
4683         ret = i40e_vsi_add_mac(vsi, &filter);
4684         if (ret != I40E_SUCCESS) {
4685                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4686                 goto fail_msix_alloc;
4687         }
4688
4689         /* Get VSI BW information */
4690         i40e_vsi_get_bw_config(vsi);
4691         return vsi;
4692 fail_msix_alloc:
4693         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
4694 fail_queue_alloc:
4695         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
4696 fail_mem:
4697         rte_free(vsi);
4698         return NULL;
4699 }
4700
4701 /* Configure vlan filter on or off */
4702 int
4703 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
4704 {
4705         int i, num;
4706         struct i40e_mac_filter *f;
4707         void *temp;
4708         struct i40e_mac_filter_info *mac_filter;
4709         enum rte_mac_filter_type desired_filter;
4710         int ret = I40E_SUCCESS;
4711
4712         if (on) {
4713                 /* Filter to match MAC and VLAN */
4714                 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
4715         } else {
4716                 /* Filter to match only MAC */
4717                 desired_filter = RTE_MAC_PERFECT_MATCH;
4718         }
4719
4720         num = vsi->mac_num;
4721
4722         mac_filter = rte_zmalloc("mac_filter_info_data",
4723                                  num * sizeof(*mac_filter), 0);
4724         if (mac_filter == NULL) {
4725                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4726                 return I40E_ERR_NO_MEMORY;
4727         }
4728
4729         i = 0;
4730
4731         /* Remove all existing mac */
4732         TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
4733                 mac_filter[i] = f->mac_info;
4734                 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
4735                 if (ret) {
4736                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4737                                     on ? "enable" : "disable");
4738                         goto DONE;
4739                 }
4740                 i++;
4741         }
4742
4743         /* Override with new filter */
4744         for (i = 0; i < num; i++) {
4745                 mac_filter[i].filter_type = desired_filter;
4746                 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
4747                 if (ret) {
4748                         PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4749                                     on ? "enable" : "disable");
4750                         goto DONE;
4751                 }
4752         }
4753
4754 DONE:
4755         rte_free(mac_filter);
4756         return ret;
4757 }
4758
4759 /* Configure vlan stripping on or off */
4760 int
4761 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
4762 {
4763         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4764         struct i40e_vsi_context ctxt;
4765         uint8_t vlan_flags;
4766         int ret = I40E_SUCCESS;
4767
4768         /* Check if it has been already on or off */
4769         if (vsi->info.valid_sections &
4770                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
4771                 if (on) {
4772                         if ((vsi->info.port_vlan_flags &
4773                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
4774                                 return 0; /* already on */
4775                 } else {
4776                         if ((vsi->info.port_vlan_flags &
4777                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
4778                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
4779                                 return 0; /* already off */
4780                 }
4781         }
4782
4783         if (on)
4784                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4785         else
4786                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
4787         vsi->info.valid_sections =
4788                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4789         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
4790         vsi->info.port_vlan_flags |= vlan_flags;
4791         ctxt.seid = vsi->seid;
4792         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4793         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4794         if (ret)
4795                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
4796                             on ? "enable" : "disable");
4797
4798         return ret;
4799 }
4800
4801 static int
4802 i40e_dev_init_vlan(struct rte_eth_dev *dev)
4803 {
4804         struct rte_eth_dev_data *data = dev->data;
4805         int ret;
4806         int mask = 0;
4807
4808         /* Apply vlan offload setting */
4809         mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
4810         i40e_vlan_offload_set(dev, mask);
4811
4812         /* Apply double-vlan setting, not implemented yet */
4813
4814         /* Apply pvid setting */
4815         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
4816                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
4817         if (ret)
4818                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
4819
4820         return ret;
4821 }
4822
4823 static int
4824 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
4825 {
4826         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4827
4828         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
4829 }
4830
4831 static int
4832 i40e_update_flow_control(struct i40e_hw *hw)
4833 {
4834 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
4835         struct i40e_link_status link_status;
4836         uint32_t rxfc = 0, txfc = 0, reg;
4837         uint8_t an_info;
4838         int ret;
4839
4840         memset(&link_status, 0, sizeof(link_status));
4841         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
4842         if (ret != I40E_SUCCESS) {
4843                 PMD_DRV_LOG(ERR, "Failed to get link status information");
4844                 goto write_reg; /* Disable flow control */
4845         }
4846
4847         an_info = hw->phy.link_info.an_info;
4848         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
4849                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
4850                 ret = I40E_ERR_NOT_READY;
4851                 goto write_reg; /* Disable flow control */
4852         }
4853         /**
4854          * If link auto negotiation is enabled, flow control needs to
4855          * be configured according to it
4856          */
4857         switch (an_info & I40E_LINK_PAUSE_RXTX) {
4858         case I40E_LINK_PAUSE_RXTX:
4859                 rxfc = 1;
4860                 txfc = 1;
4861                 hw->fc.current_mode = I40E_FC_FULL;
4862                 break;
4863         case I40E_AQ_LINK_PAUSE_RX:
4864                 rxfc = 1;
4865                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
4866                 break;
4867         case I40E_AQ_LINK_PAUSE_TX:
4868                 txfc = 1;
4869                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
4870                 break;
4871         default:
4872                 hw->fc.current_mode = I40E_FC_NONE;
4873                 break;
4874         }
4875
4876 write_reg:
4877         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
4878                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
4879         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4880         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
4881         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
4882         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
4883
4884         return ret;
4885 }
4886
4887 /* PF setup */
4888 static int
4889 i40e_pf_setup(struct i40e_pf *pf)
4890 {
4891         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4892         struct i40e_filter_control_settings settings;
4893         struct i40e_vsi *vsi;
4894         int ret;
4895
4896         /* Clear all stats counters */
4897         pf->offset_loaded = FALSE;
4898         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
4899         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
4900
4901         ret = i40e_pf_get_switch_config(pf);
4902         if (ret != I40E_SUCCESS) {
4903                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
4904                 return ret;
4905         }
4906         if (pf->flags & I40E_FLAG_FDIR) {
4907                 /* make queue allocated first, let FDIR use queue pair 0*/
4908                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
4909                 if (ret != I40E_FDIR_QUEUE_ID) {
4910                         PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
4911                                     " ret =%d", ret);
4912                         pf->flags &= ~I40E_FLAG_FDIR;
4913                 }
4914         }
4915         /*  main VSI setup */
4916         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
4917         if (!vsi) {
4918                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
4919                 return I40E_ERR_NOT_READY;
4920         }
4921         pf->main_vsi = vsi;
4922
4923         /* Configure filter control */
4924         memset(&settings, 0, sizeof(settings));
4925         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
4926                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
4927         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
4928                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
4929         else {
4930                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
4931                                                 hw->func_caps.rss_table_size);
4932                 return I40E_ERR_PARAM;
4933         }
4934         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
4935                         "size: %u\n", hw->func_caps.rss_table_size);
4936         pf->hash_lut_size = hw->func_caps.rss_table_size;
4937
4938         /* Enable ethtype and macvlan filters */
4939         settings.enable_ethtype = TRUE;
4940         settings.enable_macvlan = TRUE;
4941         ret = i40e_set_filter_control(hw, &settings);
4942         if (ret)
4943                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
4944                                                                 ret);
4945
4946         /* Update flow control according to the auto negotiation */
4947         i40e_update_flow_control(hw);
4948
4949         return I40E_SUCCESS;
4950 }
4951
4952 int
4953 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4954 {
4955         uint32_t reg;
4956         uint16_t j;
4957
4958         /**
4959          * Set or clear TX Queue Disable flags,
4960          * which is required by hardware.
4961          */
4962         i40e_pre_tx_queue_cfg(hw, q_idx, on);
4963         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
4964
4965         /* Wait until the request is finished */
4966         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4967                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4968                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4969                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4970                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
4971                                                         & 0x1))) {
4972                         break;
4973                 }
4974         }
4975         if (on) {
4976                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4977                         return I40E_SUCCESS; /* already on, skip next steps */
4978
4979                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
4980                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4981         } else {
4982                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4983                         return I40E_SUCCESS; /* already off, skip next steps */
4984                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4985         }
4986         /* Write the register */
4987         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
4988         /* Check the result */
4989         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4990                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4991                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4992                 if (on) {
4993                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4994                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
4995                                 break;
4996                 } else {
4997                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4998                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4999                                 break;
5000                 }
5001         }
5002         /* Check if it is timeout */
5003         if (j >= I40E_CHK_Q_ENA_COUNT) {
5004                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5005                             (on ? "enable" : "disable"), q_idx);
5006                 return I40E_ERR_TIMEOUT;
5007         }
5008
5009         return I40E_SUCCESS;
5010 }
5011
5012 /* Swith on or off the tx queues */
5013 static int
5014 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5015 {
5016         struct rte_eth_dev_data *dev_data = pf->dev_data;
5017         struct i40e_tx_queue *txq;
5018         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5019         uint16_t i;
5020         int ret;
5021
5022         for (i = 0; i < dev_data->nb_tx_queues; i++) {
5023                 txq = dev_data->tx_queues[i];
5024                 /* Don't operate the queue if not configured or
5025                  * if starting only per queue */
5026                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5027                         continue;
5028                 if (on)
5029                         ret = i40e_dev_tx_queue_start(dev, i);
5030                 else
5031                         ret = i40e_dev_tx_queue_stop(dev, i);
5032                 if ( ret != I40E_SUCCESS)
5033                         return ret;
5034         }
5035
5036         return I40E_SUCCESS;
5037 }
5038
5039 int
5040 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5041 {
5042         uint32_t reg;
5043         uint16_t j;
5044
5045         /* Wait until the request is finished */
5046         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5047                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5048                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5049                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5050                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5051                         break;
5052         }
5053
5054         if (on) {
5055                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5056                         return I40E_SUCCESS; /* Already on, skip next steps */
5057                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5058         } else {
5059                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5060                         return I40E_SUCCESS; /* Already off, skip next steps */
5061                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5062         }
5063
5064         /* Write the register */
5065         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5066         /* Check the result */
5067         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5068                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5069                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5070                 if (on) {
5071                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5072                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
5073                                 break;
5074                 } else {
5075                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5076                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5077                                 break;
5078                 }
5079         }
5080
5081         /* Check if it is timeout */
5082         if (j >= I40E_CHK_Q_ENA_COUNT) {
5083                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5084                             (on ? "enable" : "disable"), q_idx);
5085                 return I40E_ERR_TIMEOUT;
5086         }
5087
5088         return I40E_SUCCESS;
5089 }
5090 /* Switch on or off the rx queues */
5091 static int
5092 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5093 {
5094         struct rte_eth_dev_data *dev_data = pf->dev_data;
5095         struct i40e_rx_queue *rxq;
5096         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5097         uint16_t i;
5098         int ret;
5099
5100         for (i = 0; i < dev_data->nb_rx_queues; i++) {
5101                 rxq = dev_data->rx_queues[i];
5102                 /* Don't operate the queue if not configured or
5103                  * if starting only per queue */
5104                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5105                         continue;
5106                 if (on)
5107                         ret = i40e_dev_rx_queue_start(dev, i);
5108                 else
5109                         ret = i40e_dev_rx_queue_stop(dev, i);
5110                 if (ret != I40E_SUCCESS)
5111                         return ret;
5112         }
5113
5114         return I40E_SUCCESS;
5115 }
5116
5117 /* Switch on or off all the rx/tx queues */
5118 int
5119 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5120 {
5121         int ret;
5122
5123         if (on) {
5124                 /* enable rx queues before enabling tx queues */
5125                 ret = i40e_dev_switch_rx_queues(pf, on);
5126                 if (ret) {
5127                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5128                         return ret;
5129                 }
5130                 ret = i40e_dev_switch_tx_queues(pf, on);
5131         } else {
5132                 /* Stop tx queues before stopping rx queues */
5133                 ret = i40e_dev_switch_tx_queues(pf, on);
5134                 if (ret) {
5135                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5136                         return ret;
5137                 }
5138                 ret = i40e_dev_switch_rx_queues(pf, on);
5139         }
5140
5141         return ret;
5142 }
5143
5144 /* Initialize VSI for TX */
5145 static int
5146 i40e_dev_tx_init(struct i40e_pf *pf)
5147 {
5148         struct rte_eth_dev_data *data = pf->dev_data;
5149         uint16_t i;
5150         uint32_t ret = I40E_SUCCESS;
5151         struct i40e_tx_queue *txq;
5152
5153         for (i = 0; i < data->nb_tx_queues; i++) {
5154                 txq = data->tx_queues[i];
5155                 if (!txq || !txq->q_set)
5156                         continue;
5157                 ret = i40e_tx_queue_init(txq);
5158                 if (ret != I40E_SUCCESS)
5159                         break;
5160         }
5161         if (ret == I40E_SUCCESS)
5162                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5163                                      ->eth_dev);
5164
5165         return ret;
5166 }
5167
5168 /* Initialize VSI for RX */
5169 static int
5170 i40e_dev_rx_init(struct i40e_pf *pf)
5171 {
5172         struct rte_eth_dev_data *data = pf->dev_data;
5173         int ret = I40E_SUCCESS;
5174         uint16_t i;
5175         struct i40e_rx_queue *rxq;
5176
5177         i40e_pf_config_mq_rx(pf);
5178         for (i = 0; i < data->nb_rx_queues; i++) {
5179                 rxq = data->rx_queues[i];
5180                 if (!rxq || !rxq->q_set)
5181                         continue;
5182
5183                 ret = i40e_rx_queue_init(rxq);
5184                 if (ret != I40E_SUCCESS) {
5185                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
5186                                     "initialization");
5187                         break;
5188                 }
5189         }
5190         if (ret == I40E_SUCCESS)
5191                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5192                                      ->eth_dev);
5193
5194         return ret;
5195 }
5196
5197 static int
5198 i40e_dev_rxtx_init(struct i40e_pf *pf)
5199 {
5200         int err;
5201
5202         err = i40e_dev_tx_init(pf);
5203         if (err) {
5204                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5205                 return err;
5206         }
5207         err = i40e_dev_rx_init(pf);
5208         if (err) {
5209                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5210                 return err;
5211         }
5212
5213         return err;
5214 }
5215
5216 static int
5217 i40e_vmdq_setup(struct rte_eth_dev *dev)
5218 {
5219         struct rte_eth_conf *conf = &dev->data->dev_conf;
5220         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5221         int i, err, conf_vsis, j, loop;
5222         struct i40e_vsi *vsi;
5223         struct i40e_vmdq_info *vmdq_info;
5224         struct rte_eth_vmdq_rx_conf *vmdq_conf;
5225         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5226
5227         /*
5228          * Disable interrupt to avoid message from VF. Furthermore, it will
5229          * avoid race condition in VSI creation/destroy.
5230          */
5231         i40e_pf_disable_irq0(hw);
5232
5233         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5234                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5235                 return -ENOTSUP;
5236         }
5237
5238         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5239         if (conf_vsis > pf->max_nb_vmdq_vsi) {
5240                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5241                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5242                         pf->max_nb_vmdq_vsi);
5243                 return -ENOTSUP;
5244         }
5245
5246         if (pf->vmdq != NULL) {
5247                 PMD_INIT_LOG(INFO, "VMDQ already configured");
5248                 return 0;
5249         }
5250
5251         pf->vmdq = rte_zmalloc("vmdq_info_struct",
5252                                 sizeof(*vmdq_info) * conf_vsis, 0);
5253
5254         if (pf->vmdq == NULL) {
5255                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
5256                 return -ENOMEM;
5257         }
5258
5259         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5260
5261         /* Create VMDQ VSI */
5262         for (i = 0; i < conf_vsis; i++) {
5263                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5264                                 vmdq_conf->enable_loop_back);
5265                 if (vsi == NULL) {
5266                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5267                         err = -1;
5268                         goto err_vsi_setup;
5269                 }
5270                 vmdq_info = &pf->vmdq[i];
5271                 vmdq_info->pf = pf;
5272                 vmdq_info->vsi = vsi;
5273         }
5274         pf->nb_cfg_vmdq_vsi = conf_vsis;
5275
5276         /* Configure Vlan */
5277         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5278         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5279                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5280                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5281                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5282                                         vmdq_conf->pool_map[i].vlan_id, j);
5283
5284                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5285                                                 vmdq_conf->pool_map[i].vlan_id);
5286                                 if (err) {
5287                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
5288                                         err = -1;
5289                                         goto err_vsi_setup;
5290                                 }
5291                         }
5292                 }
5293         }
5294
5295         i40e_pf_enable_irq0(hw);
5296
5297         return 0;
5298
5299 err_vsi_setup:
5300         for (i = 0; i < conf_vsis; i++)
5301                 if (pf->vmdq[i].vsi == NULL)
5302                         break;
5303                 else
5304                         i40e_vsi_release(pf->vmdq[i].vsi);
5305
5306         rte_free(pf->vmdq);
5307         pf->vmdq = NULL;
5308         i40e_pf_enable_irq0(hw);
5309         return err;
5310 }
5311
5312 static void
5313 i40e_stat_update_32(struct i40e_hw *hw,
5314                    uint32_t reg,
5315                    bool offset_loaded,
5316                    uint64_t *offset,
5317                    uint64_t *stat)
5318 {
5319         uint64_t new_data;
5320
5321         new_data = (uint64_t)I40E_READ_REG(hw, reg);
5322         if (!offset_loaded)
5323                 *offset = new_data;
5324
5325         if (new_data >= *offset)
5326                 *stat = (uint64_t)(new_data - *offset);
5327         else
5328                 *stat = (uint64_t)((new_data +
5329                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5330 }
5331
5332 static void
5333 i40e_stat_update_48(struct i40e_hw *hw,
5334                    uint32_t hireg,
5335                    uint32_t loreg,
5336                    bool offset_loaded,
5337                    uint64_t *offset,
5338                    uint64_t *stat)
5339 {
5340         uint64_t new_data;
5341
5342         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5343         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5344                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5345
5346         if (!offset_loaded)
5347                 *offset = new_data;
5348
5349         if (new_data >= *offset)
5350                 *stat = new_data - *offset;
5351         else
5352                 *stat = (uint64_t)((new_data +
5353                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5354
5355         *stat &= I40E_48_BIT_MASK;
5356 }
5357
5358 /* Disable IRQ0 */
5359 void
5360 i40e_pf_disable_irq0(struct i40e_hw *hw)
5361 {
5362         /* Disable all interrupt types */
5363         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
5364         I40E_WRITE_FLUSH(hw);
5365 }
5366
5367 /* Enable IRQ0 */
5368 void
5369 i40e_pf_enable_irq0(struct i40e_hw *hw)
5370 {
5371         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
5372                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
5373                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
5374                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
5375         I40E_WRITE_FLUSH(hw);
5376 }
5377
5378 static void
5379 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
5380 {
5381         /* read pending request and disable first */
5382         i40e_pf_disable_irq0(hw);
5383         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
5384         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
5385                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
5386
5387         if (no_queue)
5388                 /* Link no queues with irq0 */
5389                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
5390                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
5391 }
5392
5393 static void
5394 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
5395 {
5396         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5397         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5398         int i;
5399         uint16_t abs_vf_id;
5400         uint32_t index, offset, val;
5401
5402         if (!pf->vfs)
5403                 return;
5404         /**
5405          * Try to find which VF trigger a reset, use absolute VF id to access
5406          * since the reg is global register.
5407          */
5408         for (i = 0; i < pf->vf_num; i++) {
5409                 abs_vf_id = hw->func_caps.vf_base_id + i;
5410                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
5411                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
5412                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
5413                 /* VFR event occured */
5414                 if (val & (0x1 << offset)) {
5415                         int ret;
5416
5417                         /* Clear the event first */
5418                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
5419                                                         (0x1 << offset));
5420                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
5421                         /**
5422                          * Only notify a VF reset event occured,
5423                          * don't trigger another SW reset
5424                          */
5425                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
5426                         if (ret != I40E_SUCCESS)
5427                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
5428                 }
5429         }
5430 }
5431
5432 static void
5433 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
5434 {
5435         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5436         struct i40e_arq_event_info info;
5437         uint16_t pending, opcode;
5438         int ret;
5439
5440         info.buf_len = I40E_AQ_BUF_SZ;
5441         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
5442         if (!info.msg_buf) {
5443                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
5444                 return;
5445         }
5446
5447         pending = 1;
5448         while (pending) {
5449                 ret = i40e_clean_arq_element(hw, &info, &pending);
5450
5451                 if (ret != I40E_SUCCESS) {
5452                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
5453                                     "aq_err: %u", hw->aq.asq_last_status);
5454                         break;
5455                 }
5456                 opcode = rte_le_to_cpu_16(info.desc.opcode);
5457
5458                 switch (opcode) {
5459                 case i40e_aqc_opc_send_msg_to_pf:
5460                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
5461                         i40e_pf_host_handle_vf_msg(dev,
5462                                         rte_le_to_cpu_16(info.desc.retval),
5463                                         rte_le_to_cpu_32(info.desc.cookie_high),
5464                                         rte_le_to_cpu_32(info.desc.cookie_low),
5465                                         info.msg_buf,
5466                                         info.msg_len);
5467                         break;
5468                 default:
5469                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
5470                                     opcode);
5471                         break;
5472                 }
5473         }
5474         rte_free(info.msg_buf);
5475 }
5476
5477 /*
5478  * Interrupt handler is registered as the alarm callback for handling LSC
5479  * interrupt in a definite of time, in order to wait the NIC into a stable
5480  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
5481  * no need for link down interrupt.
5482  */
5483 static void
5484 i40e_dev_interrupt_delayed_handler(void *param)
5485 {
5486         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5487         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5488         uint32_t icr0;
5489
5490         /* read interrupt causes again */
5491         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5492
5493 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
5494         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5495                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
5496         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5497                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
5498         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5499                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
5500         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5501                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
5502         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5503                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
5504                                                                 "state\n");
5505         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5506                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
5507         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5508                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
5509 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
5510
5511         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5512                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
5513                 i40e_dev_handle_vfr_event(dev);
5514         }
5515         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5516                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
5517                 i40e_dev_handle_aq_msg(dev);
5518         }
5519
5520         /* handle the link up interrupt in an alarm callback */
5521         i40e_dev_link_update(dev, 0);
5522         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
5523
5524         i40e_pf_enable_irq0(hw);
5525         rte_intr_enable(&(dev->pci_dev->intr_handle));
5526 }
5527
5528 /**
5529  * Interrupt handler triggered by NIC  for handling
5530  * specific interrupt.
5531  *
5532  * @param handle
5533  *  Pointer to interrupt handle.
5534  * @param param
5535  *  The address of parameter (struct rte_eth_dev *) regsitered before.
5536  *
5537  * @return
5538  *  void
5539  */
5540 static void
5541 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
5542                            void *param)
5543 {
5544         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5545         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5546         uint32_t icr0;
5547
5548         /* Disable interrupt */
5549         i40e_pf_disable_irq0(hw);
5550
5551         /* read out interrupt causes */
5552         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5553
5554         /* No interrupt event indicated */
5555         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
5556                 PMD_DRV_LOG(INFO, "No interrupt event");
5557                 goto done;
5558         }
5559 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
5560         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5561                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
5562         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5563                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
5564         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5565                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
5566         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5567                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
5568         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5569                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
5570         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5571                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
5572         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5573                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
5574 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
5575
5576         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5577                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
5578                 i40e_dev_handle_vfr_event(dev);
5579         }
5580         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5581                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
5582                 i40e_dev_handle_aq_msg(dev);
5583         }
5584
5585         /* Link Status Change interrupt */
5586         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
5587 #define I40E_US_PER_SECOND 1000000
5588                 struct rte_eth_link link;
5589
5590                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
5591                 memset(&link, 0, sizeof(link));
5592                 rte_i40e_dev_atomic_read_link_status(dev, &link);
5593                 i40e_dev_link_update(dev, 0);
5594
5595                 /*
5596                  * For link up interrupt, it needs to wait 1 second to let the
5597                  * hardware be a stable state. Otherwise several consecutive
5598                  * interrupts can be observed.
5599                  * For link down interrupt, no need to wait.
5600                  */
5601                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
5602                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
5603                         return;
5604                 else
5605                         _rte_eth_dev_callback_process(dev,
5606                                 RTE_ETH_EVENT_INTR_LSC, NULL);
5607         }
5608
5609 done:
5610         /* Enable interrupt */
5611         i40e_pf_enable_irq0(hw);
5612         rte_intr_enable(&(dev->pci_dev->intr_handle));
5613 }
5614
5615 static int
5616 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
5617                          struct i40e_macvlan_filter *filter,
5618                          int total)
5619 {
5620         int ele_num, ele_buff_size;
5621         int num, actual_num, i;
5622         uint16_t flags;
5623         int ret = I40E_SUCCESS;
5624         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5625         struct i40e_aqc_add_macvlan_element_data *req_list;
5626
5627         if (filter == NULL  || total == 0)
5628                 return I40E_ERR_PARAM;
5629         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5630         ele_buff_size = hw->aq.asq_buf_size;
5631
5632         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5633         if (req_list == NULL) {
5634                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5635                 return I40E_ERR_NO_MEMORY;
5636         }
5637
5638         num = 0;
5639         do {
5640                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5641                 memset(req_list, 0, ele_buff_size);
5642
5643                 for (i = 0; i < actual_num; i++) {
5644                         (void)rte_memcpy(req_list[i].mac_addr,
5645                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5646                         req_list[i].vlan_tag =
5647                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5648
5649                         switch (filter[num + i].filter_type) {
5650                         case RTE_MAC_PERFECT_MATCH:
5651                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5652                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5653                                 break;
5654                         case RTE_MACVLAN_PERFECT_MATCH:
5655                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5656                                 break;
5657                         case RTE_MAC_HASH_MATCH:
5658                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5659                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5660                                 break;
5661                         case RTE_MACVLAN_HASH_MATCH:
5662                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5663                                 break;
5664                         default:
5665                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
5666                                 ret = I40E_ERR_PARAM;
5667                                 goto DONE;
5668                         }
5669
5670                         req_list[i].queue_number = 0;
5671
5672                         req_list[i].flags = rte_cpu_to_le_16(flags);
5673                 }
5674
5675                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
5676                                                 actual_num, NULL);
5677                 if (ret != I40E_SUCCESS) {
5678                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
5679                         goto DONE;
5680                 }
5681                 num += actual_num;
5682         } while (num < total);
5683
5684 DONE:
5685         rte_free(req_list);
5686         return ret;
5687 }
5688
5689 static int
5690 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
5691                             struct i40e_macvlan_filter *filter,
5692                             int total)
5693 {
5694         int ele_num, ele_buff_size;
5695         int num, actual_num, i;
5696         uint16_t flags;
5697         int ret = I40E_SUCCESS;
5698         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5699         struct i40e_aqc_remove_macvlan_element_data *req_list;
5700
5701         if (filter == NULL  || total == 0)
5702                 return I40E_ERR_PARAM;
5703
5704         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5705         ele_buff_size = hw->aq.asq_buf_size;
5706
5707         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
5708         if (req_list == NULL) {
5709                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5710                 return I40E_ERR_NO_MEMORY;
5711         }
5712
5713         num = 0;
5714         do {
5715                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5716                 memset(req_list, 0, ele_buff_size);
5717
5718                 for (i = 0; i < actual_num; i++) {
5719                         (void)rte_memcpy(req_list[i].mac_addr,
5720                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5721                         req_list[i].vlan_tag =
5722                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5723
5724                         switch (filter[num + i].filter_type) {
5725                         case RTE_MAC_PERFECT_MATCH:
5726                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5727                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5728                                 break;
5729                         case RTE_MACVLAN_PERFECT_MATCH:
5730                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
5731                                 break;
5732                         case RTE_MAC_HASH_MATCH:
5733                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
5734                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5735                                 break;
5736                         case RTE_MACVLAN_HASH_MATCH:
5737                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
5738                                 break;
5739                         default:
5740                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
5741                                 ret = I40E_ERR_PARAM;
5742                                 goto DONE;
5743                         }
5744                         req_list[i].flags = rte_cpu_to_le_16(flags);
5745                 }
5746
5747                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
5748                                                 actual_num, NULL);
5749                 if (ret != I40E_SUCCESS) {
5750                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
5751                         goto DONE;
5752                 }
5753                 num += actual_num;
5754         } while (num < total);
5755
5756 DONE:
5757         rte_free(req_list);
5758         return ret;
5759 }
5760
5761 /* Find out specific MAC filter */
5762 static struct i40e_mac_filter *
5763 i40e_find_mac_filter(struct i40e_vsi *vsi,
5764                          struct ether_addr *macaddr)
5765 {
5766         struct i40e_mac_filter *f;
5767
5768         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5769                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
5770                         return f;
5771         }
5772
5773         return NULL;
5774 }
5775
5776 static bool
5777 i40e_find_vlan_filter(struct i40e_vsi *vsi,
5778                          uint16_t vlan_id)
5779 {
5780         uint32_t vid_idx, vid_bit;
5781
5782         if (vlan_id > ETH_VLAN_ID_MAX)
5783                 return 0;
5784
5785         vid_idx = I40E_VFTA_IDX(vlan_id);
5786         vid_bit = I40E_VFTA_BIT(vlan_id);
5787
5788         if (vsi->vfta[vid_idx] & vid_bit)
5789                 return 1;
5790         else
5791                 return 0;
5792 }
5793
5794 static void
5795 i40e_set_vlan_filter(struct i40e_vsi *vsi,
5796                          uint16_t vlan_id, bool on)
5797 {
5798         uint32_t vid_idx, vid_bit;
5799
5800         if (vlan_id > ETH_VLAN_ID_MAX)
5801                 return;
5802
5803         vid_idx = I40E_VFTA_IDX(vlan_id);
5804         vid_bit = I40E_VFTA_BIT(vlan_id);
5805
5806         if (on)
5807                 vsi->vfta[vid_idx] |= vid_bit;
5808         else
5809                 vsi->vfta[vid_idx] &= ~vid_bit;
5810 }
5811
5812 /**
5813  * Find all vlan options for specific mac addr,
5814  * return with actual vlan found.
5815  */
5816 static inline int
5817 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
5818                            struct i40e_macvlan_filter *mv_f,
5819                            int num, struct ether_addr *addr)
5820 {
5821         int i;
5822         uint32_t j, k;
5823
5824         /**
5825          * Not to use i40e_find_vlan_filter to decrease the loop time,
5826          * although the code looks complex.
5827           */
5828         if (num < vsi->vlan_num)
5829                 return I40E_ERR_PARAM;
5830
5831         i = 0;
5832         for (j = 0; j < I40E_VFTA_SIZE; j++) {
5833                 if (vsi->vfta[j]) {
5834                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
5835                                 if (vsi->vfta[j] & (1 << k)) {
5836                                         if (i > num - 1) {
5837                                                 PMD_DRV_LOG(ERR, "vlan number "
5838                                                             "not match");
5839                                                 return I40E_ERR_PARAM;
5840                                         }
5841                                         (void)rte_memcpy(&mv_f[i].macaddr,
5842                                                         addr, ETH_ADDR_LEN);
5843                                         mv_f[i].vlan_id =
5844                                                 j * I40E_UINT32_BIT_SIZE + k;
5845                                         i++;
5846                                 }
5847                         }
5848                 }
5849         }
5850         return I40E_SUCCESS;
5851 }
5852
5853 static inline int
5854 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
5855                            struct i40e_macvlan_filter *mv_f,
5856                            int num,
5857                            uint16_t vlan)
5858 {
5859         int i = 0;
5860         struct i40e_mac_filter *f;
5861
5862         if (num < vsi->mac_num)
5863                 return I40E_ERR_PARAM;
5864
5865         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5866                 if (i > num - 1) {
5867                         PMD_DRV_LOG(ERR, "buffer number not match");
5868                         return I40E_ERR_PARAM;
5869                 }
5870                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5871                                 ETH_ADDR_LEN);
5872                 mv_f[i].vlan_id = vlan;
5873                 mv_f[i].filter_type = f->mac_info.filter_type;
5874                 i++;
5875         }
5876
5877         return I40E_SUCCESS;
5878 }
5879
5880 static int
5881 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
5882 {
5883         int i, num;
5884         struct i40e_mac_filter *f;
5885         struct i40e_macvlan_filter *mv_f;
5886         int ret = I40E_SUCCESS;
5887
5888         if (vsi == NULL || vsi->mac_num == 0)
5889                 return I40E_ERR_PARAM;
5890
5891         /* Case that no vlan is set */
5892         if (vsi->vlan_num == 0)
5893                 num = vsi->mac_num;
5894         else
5895                 num = vsi->mac_num * vsi->vlan_num;
5896
5897         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
5898         if (mv_f == NULL) {
5899                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5900                 return I40E_ERR_NO_MEMORY;
5901         }
5902
5903         i = 0;
5904         if (vsi->vlan_num == 0) {
5905                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5906                         (void)rte_memcpy(&mv_f[i].macaddr,
5907                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
5908                         mv_f[i].vlan_id = 0;
5909                         i++;
5910                 }
5911         } else {
5912                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5913                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
5914                                         vsi->vlan_num, &f->mac_info.mac_addr);
5915                         if (ret != I40E_SUCCESS)
5916                                 goto DONE;
5917                         i += vsi->vlan_num;
5918                 }
5919         }
5920
5921         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
5922 DONE:
5923         rte_free(mv_f);
5924
5925         return ret;
5926 }
5927
5928 int
5929 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5930 {
5931         struct i40e_macvlan_filter *mv_f;
5932         int mac_num;
5933         int ret = I40E_SUCCESS;
5934
5935         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
5936                 return I40E_ERR_PARAM;
5937
5938         /* If it's already set, just return */
5939         if (i40e_find_vlan_filter(vsi,vlan))
5940                 return I40E_SUCCESS;
5941
5942         mac_num = vsi->mac_num;
5943
5944         if (mac_num == 0) {
5945                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5946                 return I40E_ERR_PARAM;
5947         }
5948
5949         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5950
5951         if (mv_f == NULL) {
5952                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5953                 return I40E_ERR_NO_MEMORY;
5954         }
5955
5956         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5957
5958         if (ret != I40E_SUCCESS)
5959                 goto DONE;
5960
5961         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5962
5963         if (ret != I40E_SUCCESS)
5964                 goto DONE;
5965
5966         i40e_set_vlan_filter(vsi, vlan, 1);
5967
5968         vsi->vlan_num++;
5969         ret = I40E_SUCCESS;
5970 DONE:
5971         rte_free(mv_f);
5972         return ret;
5973 }
5974
5975 int
5976 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5977 {
5978         struct i40e_macvlan_filter *mv_f;
5979         int mac_num;
5980         int ret = I40E_SUCCESS;
5981
5982         /**
5983          * Vlan 0 is the generic filter for untagged packets
5984          * and can't be removed.
5985          */
5986         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
5987                 return I40E_ERR_PARAM;
5988
5989         /* If can't find it, just return */
5990         if (!i40e_find_vlan_filter(vsi, vlan))
5991                 return I40E_ERR_PARAM;
5992
5993         mac_num = vsi->mac_num;
5994
5995         if (mac_num == 0) {
5996                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5997                 return I40E_ERR_PARAM;
5998         }
5999
6000         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6001
6002         if (mv_f == NULL) {
6003                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6004                 return I40E_ERR_NO_MEMORY;
6005         }
6006
6007         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6008
6009         if (ret != I40E_SUCCESS)
6010                 goto DONE;
6011
6012         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6013
6014         if (ret != I40E_SUCCESS)
6015                 goto DONE;
6016
6017         /* This is last vlan to remove, replace all mac filter with vlan 0 */
6018         if (vsi->vlan_num == 1) {
6019                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6020                 if (ret != I40E_SUCCESS)
6021                         goto DONE;
6022
6023                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6024                 if (ret != I40E_SUCCESS)
6025                         goto DONE;
6026         }
6027
6028         i40e_set_vlan_filter(vsi, vlan, 0);
6029
6030         vsi->vlan_num--;
6031         ret = I40E_SUCCESS;
6032 DONE:
6033         rte_free(mv_f);
6034         return ret;
6035 }
6036
6037 int
6038 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6039 {
6040         struct i40e_mac_filter *f;
6041         struct i40e_macvlan_filter *mv_f;
6042         int i, vlan_num = 0;
6043         int ret = I40E_SUCCESS;
6044
6045         /* If it's add and we've config it, return */
6046         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6047         if (f != NULL)
6048                 return I40E_SUCCESS;
6049         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6050                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6051
6052                 /**
6053                  * If vlan_num is 0, that's the first time to add mac,
6054                  * set mask for vlan_id 0.
6055                  */
6056                 if (vsi->vlan_num == 0) {
6057                         i40e_set_vlan_filter(vsi, 0, 1);
6058                         vsi->vlan_num = 1;
6059                 }
6060                 vlan_num = vsi->vlan_num;
6061         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6062                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6063                 vlan_num = 1;
6064
6065         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6066         if (mv_f == NULL) {
6067                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6068                 return I40E_ERR_NO_MEMORY;
6069         }
6070
6071         for (i = 0; i < vlan_num; i++) {
6072                 mv_f[i].filter_type = mac_filter->filter_type;
6073                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6074                                 ETH_ADDR_LEN);
6075         }
6076
6077         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6078                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6079                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6080                                         &mac_filter->mac_addr);
6081                 if (ret != I40E_SUCCESS)
6082                         goto DONE;
6083         }
6084
6085         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6086         if (ret != I40E_SUCCESS)
6087                 goto DONE;
6088
6089         /* Add the mac addr into mac list */
6090         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6091         if (f == NULL) {
6092                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6093                 ret = I40E_ERR_NO_MEMORY;
6094                 goto DONE;
6095         }
6096         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6097                         ETH_ADDR_LEN);
6098         f->mac_info.filter_type = mac_filter->filter_type;
6099         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6100         vsi->mac_num++;
6101
6102         ret = I40E_SUCCESS;
6103 DONE:
6104         rte_free(mv_f);
6105
6106         return ret;
6107 }
6108
6109 int
6110 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6111 {
6112         struct i40e_mac_filter *f;
6113         struct i40e_macvlan_filter *mv_f;
6114         int i, vlan_num;
6115         enum rte_mac_filter_type filter_type;
6116         int ret = I40E_SUCCESS;
6117
6118         /* Can't find it, return an error */
6119         f = i40e_find_mac_filter(vsi, addr);
6120         if (f == NULL)
6121                 return I40E_ERR_PARAM;
6122
6123         vlan_num = vsi->vlan_num;
6124         filter_type = f->mac_info.filter_type;
6125         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6126                 filter_type == RTE_MACVLAN_HASH_MATCH) {
6127                 if (vlan_num == 0) {
6128                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
6129                         return I40E_ERR_PARAM;
6130                 }
6131         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6132                         filter_type == RTE_MAC_HASH_MATCH)
6133                 vlan_num = 1;
6134
6135         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6136         if (mv_f == NULL) {
6137                 PMD_DRV_LOG(ERR, "failed to allocate memory");
6138                 return I40E_ERR_NO_MEMORY;
6139         }
6140
6141         for (i = 0; i < vlan_num; i++) {
6142                 mv_f[i].filter_type = filter_type;
6143                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6144                                 ETH_ADDR_LEN);
6145         }
6146         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6147                         filter_type == RTE_MACVLAN_HASH_MATCH) {
6148                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6149                 if (ret != I40E_SUCCESS)
6150                         goto DONE;
6151         }
6152
6153         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6154         if (ret != I40E_SUCCESS)
6155                 goto DONE;
6156
6157         /* Remove the mac addr into mac list */
6158         TAILQ_REMOVE(&vsi->mac_list, f, next);
6159         rte_free(f);
6160         vsi->mac_num--;
6161
6162         ret = I40E_SUCCESS;
6163 DONE:
6164         rte_free(mv_f);
6165         return ret;
6166 }
6167
6168 /* Configure hash enable flags for RSS */
6169 uint64_t
6170 i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
6171 {
6172         uint64_t hena = 0;
6173
6174         if (!flags)
6175                 return hena;
6176
6177         if (flags & ETH_RSS_FRAG_IPV4)
6178                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
6179         if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
6180                 if (type == I40E_MAC_X722) {
6181                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
6182                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
6183                 } else
6184                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
6185         }
6186         if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
6187                 if (type == I40E_MAC_X722) {
6188                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
6189                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
6190                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
6191                 } else
6192                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
6193         }
6194         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
6195                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
6196         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
6197                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
6198         if (flags & ETH_RSS_FRAG_IPV6)
6199                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
6200         if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
6201                 if (type == I40E_MAC_X722) {
6202                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
6203                          (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
6204                 } else
6205                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
6206         }
6207         if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
6208                 if (type == I40E_MAC_X722) {
6209                         hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
6210                          (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
6211                          (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
6212                 } else
6213                         hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
6214         }
6215         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
6216                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
6217         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
6218                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
6219         if (flags & ETH_RSS_L2_PAYLOAD)
6220                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
6221
6222         return hena;
6223 }
6224
6225 /* Parse the hash enable flags */
6226 uint64_t
6227 i40e_parse_hena(uint64_t flags)
6228 {
6229         uint64_t rss_hf = 0;
6230
6231         if (!flags)
6232                 return rss_hf;
6233         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
6234                 rss_hf |= ETH_RSS_FRAG_IPV4;
6235         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
6236                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6237 #ifdef X722_SUPPORT
6238         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
6239                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6240 #endif
6241         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
6242                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6243 #ifdef X722_SUPPORT
6244         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
6245                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6246         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
6247                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6248 #endif
6249         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
6250                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
6251         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
6252                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
6253         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
6254                 rss_hf |= ETH_RSS_FRAG_IPV6;
6255         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
6256                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6257 #ifdef X722_SUPPORT
6258         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
6259                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6260 #endif
6261         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
6262                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6263 #ifdef X722_SUPPORT
6264         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
6265                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6266         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
6267                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6268 #endif
6269         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
6270                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
6271         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
6272                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
6273         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
6274                 rss_hf |= ETH_RSS_L2_PAYLOAD;
6275
6276         return rss_hf;
6277 }
6278
6279 /* Disable RSS */
6280 static void
6281 i40e_pf_disable_rss(struct i40e_pf *pf)
6282 {
6283         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6284         uint64_t hena;
6285
6286         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6287         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6288         if (hw->mac.type == I40E_MAC_X722)
6289                 hena &= ~I40E_RSS_HENA_ALL_X722;
6290         else
6291                 hena &= ~I40E_RSS_HENA_ALL;
6292         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6293         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6294         I40E_WRITE_FLUSH(hw);
6295 }
6296
6297 static int
6298 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6299 {
6300         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6301         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6302         int ret = 0;
6303
6304         if (!key || key_len == 0) {
6305                 PMD_DRV_LOG(DEBUG, "No key to be configured");
6306                 return 0;
6307         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6308                 sizeof(uint32_t)) {
6309                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6310                 return -EINVAL;
6311         }
6312
6313         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6314                 struct i40e_aqc_get_set_rss_key_data *key_dw =
6315                         (struct i40e_aqc_get_set_rss_key_data *)key;
6316
6317                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6318                 if (ret)
6319                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
6320                                      "via AQ");
6321         } else {
6322                 uint32_t *hash_key = (uint32_t *)key;
6323                 uint16_t i;
6324
6325                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6326                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
6327                 I40E_WRITE_FLUSH(hw);
6328         }
6329
6330         return ret;
6331 }
6332
6333 static int
6334 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6335 {
6336         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6337         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6338         int ret;
6339
6340         if (!key || !key_len)
6341                 return -EINVAL;
6342
6343         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6344                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6345                         (struct i40e_aqc_get_set_rss_key_data *)key);
6346                 if (ret) {
6347                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6348                         return ret;
6349                 }
6350         } else {
6351                 uint32_t *key_dw = (uint32_t *)key;
6352                 uint16_t i;
6353
6354                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6355                         key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6356         }
6357         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
6358
6359         return 0;
6360 }
6361
6362 static int
6363 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6364 {
6365         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6366         uint64_t rss_hf;
6367         uint64_t hena;
6368         int ret;
6369
6370         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6371                                rss_conf->rss_key_len);
6372         if (ret)
6373                 return ret;
6374
6375         rss_hf = rss_conf->rss_hf;
6376         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6377         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6378         if (hw->mac.type == I40E_MAC_X722)
6379                 hena &= ~I40E_RSS_HENA_ALL_X722;
6380         else
6381                 hena &= ~I40E_RSS_HENA_ALL;
6382         hena |= i40e_config_hena(rss_hf, hw->mac.type);
6383         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6384         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6385         I40E_WRITE_FLUSH(hw);
6386
6387         return 0;
6388 }
6389
6390 static int
6391 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6392                          struct rte_eth_rss_conf *rss_conf)
6393 {
6394         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6395         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6396         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
6397         uint64_t hena;
6398
6399         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6400         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6401         if (!(hena & ((hw->mac.type == I40E_MAC_X722)
6402                  ? I40E_RSS_HENA_ALL_X722
6403                  : I40E_RSS_HENA_ALL))) { /* RSS disabled */
6404                 if (rss_hf != 0) /* Enable RSS */
6405                         return -EINVAL;
6406                 return 0; /* Nothing to do */
6407         }
6408         /* RSS enabled */
6409         if (rss_hf == 0) /* Disable RSS */
6410                 return -EINVAL;
6411
6412         return i40e_hw_rss_hash_set(pf, rss_conf);
6413 }
6414
6415 static int
6416 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6417                            struct rte_eth_rss_conf *rss_conf)
6418 {
6419         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6420         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6421         uint64_t hena;
6422
6423         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6424                          &rss_conf->rss_key_len);
6425
6426         hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6427         hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6428         rss_conf->rss_hf = i40e_parse_hena(hena);
6429
6430         return 0;
6431 }
6432
6433 static int
6434 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6435 {
6436         switch (filter_type) {
6437         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6438                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6439                 break;
6440         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6441                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6442                 break;
6443         case RTE_TUNNEL_FILTER_IMAC_TENID:
6444                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6445                 break;
6446         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6447                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6448                 break;
6449         case ETH_TUNNEL_FILTER_IMAC:
6450                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6451                 break;
6452         case ETH_TUNNEL_FILTER_OIP:
6453                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
6454                 break;
6455         case ETH_TUNNEL_FILTER_IIP:
6456                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
6457                 break;
6458         default:
6459                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
6460                 return -EINVAL;
6461         }
6462
6463         return 0;
6464 }
6465
6466 static int
6467 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
6468                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
6469                         uint8_t add)
6470 {
6471         uint16_t ip_type;
6472         uint32_t ipv4_addr;
6473         uint8_t i, tun_type = 0;
6474         /* internal varialbe to convert ipv6 byte order */
6475         uint32_t convert_ipv6[4];
6476         int val, ret = 0;
6477         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6478         struct i40e_vsi *vsi = pf->main_vsi;
6479         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
6480         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
6481
6482         cld_filter = rte_zmalloc("tunnel_filter",
6483                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
6484                 0);
6485
6486         if (NULL == cld_filter) {
6487                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
6488                 return -EINVAL;
6489         }
6490         pfilter = cld_filter;
6491
6492         ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
6493         ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
6494
6495         pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
6496         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
6497                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
6498                 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
6499                 rte_memcpy(&pfilter->ipaddr.v4.data,
6500                                 &rte_cpu_to_le_32(ipv4_addr),
6501                                 sizeof(pfilter->ipaddr.v4.data));
6502         } else {
6503                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
6504                 for (i = 0; i < 4; i++) {
6505                         convert_ipv6[i] =
6506                         rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
6507                 }
6508                 rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
6509                                 sizeof(pfilter->ipaddr.v6.data));
6510         }
6511
6512         /* check tunneled type */
6513         switch (tunnel_filter->tunnel_type) {
6514         case RTE_TUNNEL_TYPE_VXLAN:
6515                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
6516                 break;
6517         case RTE_TUNNEL_TYPE_NVGRE:
6518                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
6519                 break;
6520         case RTE_TUNNEL_TYPE_IP_IN_GRE:
6521                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
6522                 break;
6523         default:
6524                 /* Other tunnel types is not supported. */
6525                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
6526                 rte_free(cld_filter);
6527                 return -EINVAL;
6528         }
6529
6530         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
6531                                                 &pfilter->flags);
6532         if (val < 0) {
6533                 rte_free(cld_filter);
6534                 return -EINVAL;
6535         }
6536
6537         pfilter->flags |= rte_cpu_to_le_16(
6538                 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
6539                 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
6540         pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
6541         pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
6542
6543         if (add)
6544                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
6545         else
6546                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
6547                                                 cld_filter, 1);
6548
6549         rte_free(cld_filter);
6550         return ret;
6551 }
6552
6553 static int
6554 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
6555 {
6556         uint8_t i;
6557
6558         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6559                 if (pf->vxlan_ports[i] == port)
6560                         return i;
6561         }
6562
6563         return -1;
6564 }
6565
6566 static int
6567 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
6568 {
6569         int  idx, ret;
6570         uint8_t filter_idx;
6571         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6572
6573         idx = i40e_get_vxlan_port_idx(pf, port);
6574
6575         /* Check if port already exists */
6576         if (idx >= 0) {
6577                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
6578                 return -EINVAL;
6579         }
6580
6581         /* Now check if there is space to add the new port */
6582         idx = i40e_get_vxlan_port_idx(pf, 0);
6583         if (idx < 0) {
6584                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
6585                         "not adding port %d", port);
6586                 return -ENOSPC;
6587         }
6588
6589         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
6590                                         &filter_idx, NULL);
6591         if (ret < 0) {
6592                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
6593                 return -1;
6594         }
6595
6596         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
6597                          port,  filter_idx);
6598
6599         /* New port: add it and mark its index in the bitmap */
6600         pf->vxlan_ports[idx] = port;
6601         pf->vxlan_bitmap |= (1 << idx);
6602
6603         if (!(pf->flags & I40E_FLAG_VXLAN))
6604                 pf->flags |= I40E_FLAG_VXLAN;
6605
6606         return 0;
6607 }
6608
6609 static int
6610 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
6611 {
6612         int idx;
6613         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6614
6615         if (!(pf->flags & I40E_FLAG_VXLAN)) {
6616                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
6617                 return -EINVAL;
6618         }
6619
6620         idx = i40e_get_vxlan_port_idx(pf, port);
6621
6622         if (idx < 0) {
6623                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
6624                 return -EINVAL;
6625         }
6626
6627         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
6628                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
6629                 return -1;
6630         }
6631
6632         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
6633                         port, idx);
6634
6635         pf->vxlan_ports[idx] = 0;
6636         pf->vxlan_bitmap &= ~(1 << idx);
6637
6638         if (!pf->vxlan_bitmap)
6639                 pf->flags &= ~I40E_FLAG_VXLAN;
6640
6641         return 0;
6642 }
6643
6644 /* Add UDP tunneling port */
6645 static int
6646 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
6647                              struct rte_eth_udp_tunnel *udp_tunnel)
6648 {
6649         int ret = 0;
6650         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6651
6652         if (udp_tunnel == NULL)
6653                 return -EINVAL;
6654
6655         switch (udp_tunnel->prot_type) {
6656         case RTE_TUNNEL_TYPE_VXLAN:
6657                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
6658                 break;
6659
6660         case RTE_TUNNEL_TYPE_GENEVE:
6661         case RTE_TUNNEL_TYPE_TEREDO:
6662                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6663                 ret = -1;
6664                 break;
6665
6666         default:
6667                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6668                 ret = -1;
6669                 break;
6670         }
6671
6672         return ret;
6673 }
6674
6675 /* Remove UDP tunneling port */
6676 static int
6677 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
6678                              struct rte_eth_udp_tunnel *udp_tunnel)
6679 {
6680         int ret = 0;
6681         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6682
6683         if (udp_tunnel == NULL)
6684                 return -EINVAL;
6685
6686         switch (udp_tunnel->prot_type) {
6687         case RTE_TUNNEL_TYPE_VXLAN:
6688                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
6689                 break;
6690         case RTE_TUNNEL_TYPE_GENEVE:
6691         case RTE_TUNNEL_TYPE_TEREDO:
6692                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6693                 ret = -1;
6694                 break;
6695         default:
6696                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6697                 ret = -1;
6698                 break;
6699         }
6700
6701         return ret;
6702 }
6703
6704 /* Calculate the maximum number of contiguous PF queues that are configured */
6705 static int
6706 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
6707 {
6708         struct rte_eth_dev_data *data = pf->dev_data;
6709         int i, num;
6710         struct i40e_rx_queue *rxq;
6711
6712         num = 0;
6713         for (i = 0; i < pf->lan_nb_qps; i++) {
6714                 rxq = data->rx_queues[i];
6715                 if (rxq && rxq->q_set)
6716                         num++;
6717                 else
6718                         break;
6719         }
6720
6721         return num;
6722 }
6723
6724 /* Configure RSS */
6725 static int
6726 i40e_pf_config_rss(struct i40e_pf *pf)
6727 {
6728         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6729         struct rte_eth_rss_conf rss_conf;
6730         uint32_t i, lut = 0;
6731         uint16_t j, num;
6732
6733         /*
6734          * If both VMDQ and RSS enabled, not all of PF queues are configured.
6735          * It's necessary to calulate the actual PF queues that are configured.
6736          */
6737         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
6738                 num = i40e_pf_calc_configured_queues_num(pf);
6739         else
6740                 num = pf->dev_data->nb_rx_queues;
6741
6742         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
6743         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
6744                         num);
6745
6746         if (num == 0) {
6747                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
6748                 return -ENOTSUP;
6749         }
6750
6751         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
6752                 if (j == num)
6753                         j = 0;
6754                 lut = (lut << 8) | (j & ((0x1 <<
6755                         hw->func_caps.rss_table_entry_width) - 1));
6756                 if ((i & 3) == 3)
6757                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
6758         }
6759
6760         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
6761         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
6762                 i40e_pf_disable_rss(pf);
6763                 return 0;
6764         }
6765         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
6766                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
6767                 /* Random default keys */
6768                 static uint32_t rss_key_default[] = {0x6b793944,
6769                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
6770                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
6771                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
6772
6773                 rss_conf.rss_key = (uint8_t *)rss_key_default;
6774                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6775                                                         sizeof(uint32_t);
6776         }
6777
6778         return i40e_hw_rss_hash_set(pf, &rss_conf);
6779 }
6780
6781 static int
6782 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
6783                                struct rte_eth_tunnel_filter_conf *filter)
6784 {
6785         if (pf == NULL || filter == NULL) {
6786                 PMD_DRV_LOG(ERR, "Invalid parameter");
6787                 return -EINVAL;
6788         }
6789
6790         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
6791                 PMD_DRV_LOG(ERR, "Invalid queue ID");
6792                 return -EINVAL;
6793         }
6794
6795         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
6796                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
6797                 return -EINVAL;
6798         }
6799
6800         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
6801                 (is_zero_ether_addr(&filter->outer_mac))) {
6802                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
6803                 return -EINVAL;
6804         }
6805
6806         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
6807                 (is_zero_ether_addr(&filter->inner_mac))) {
6808                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
6809                 return -EINVAL;
6810         }
6811
6812         return 0;
6813 }
6814
6815 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
6816 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
6817 static int
6818 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
6819 {
6820         uint32_t val, reg;
6821         int ret = -EINVAL;
6822
6823         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
6824         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
6825
6826         if (len == 3) {
6827                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
6828         } else if (len == 4) {
6829                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
6830         } else {
6831                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
6832                 return ret;
6833         }
6834
6835         if (reg != val) {
6836                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
6837                                                    reg, NULL);
6838                 if (ret != 0)
6839                         return ret;
6840         } else {
6841                 ret = 0;
6842         }
6843         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
6844                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
6845
6846         return ret;
6847 }
6848
6849 static int
6850 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
6851 {
6852         int ret = -EINVAL;
6853
6854         if (!hw || !cfg)
6855                 return -EINVAL;
6856
6857         switch (cfg->cfg_type) {
6858         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
6859                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
6860                 break;
6861         default:
6862                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
6863                 break;
6864         }
6865
6866         return ret;
6867 }
6868
6869 static int
6870 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
6871                                enum rte_filter_op filter_op,
6872                                void *arg)
6873 {
6874         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6875         int ret = I40E_ERR_PARAM;
6876
6877         switch (filter_op) {
6878         case RTE_ETH_FILTER_SET:
6879                 ret = i40e_dev_global_config_set(hw,
6880                         (struct rte_eth_global_cfg *)arg);
6881                 break;
6882         default:
6883                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6884                 break;
6885         }
6886
6887         return ret;
6888 }
6889
6890 static int
6891 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
6892                           enum rte_filter_op filter_op,
6893                           void *arg)
6894 {
6895         struct rte_eth_tunnel_filter_conf *filter;
6896         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6897         int ret = I40E_SUCCESS;
6898
6899         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
6900
6901         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
6902                 return I40E_ERR_PARAM;
6903
6904         switch (filter_op) {
6905         case RTE_ETH_FILTER_NOP:
6906                 if (!(pf->flags & I40E_FLAG_VXLAN))
6907                         ret = I40E_NOT_SUPPORTED;
6908                 break;
6909         case RTE_ETH_FILTER_ADD:
6910                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
6911                 break;
6912         case RTE_ETH_FILTER_DELETE:
6913                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
6914                 break;
6915         default:
6916                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6917                 ret = I40E_ERR_PARAM;
6918                 break;
6919         }
6920
6921         return ret;
6922 }
6923
6924 static int
6925 i40e_pf_config_mq_rx(struct i40e_pf *pf)
6926 {
6927         int ret = 0;
6928         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
6929
6930         /* RSS setup */
6931         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
6932                 ret = i40e_pf_config_rss(pf);
6933         else
6934                 i40e_pf_disable_rss(pf);
6935
6936         return ret;
6937 }
6938
6939 /* Get the symmetric hash enable configurations per port */
6940 static void
6941 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
6942 {
6943         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
6944
6945         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
6946 }
6947
6948 /* Set the symmetric hash enable configurations per port */
6949 static void
6950 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
6951 {
6952         uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
6953
6954         if (enable > 0) {
6955                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
6956                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6957                                                         "been enabled");
6958                         return;
6959                 }
6960                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6961         } else {
6962                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
6963                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6964                                                         "been disabled");
6965                         return;
6966                 }
6967                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6968         }
6969         i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
6970         I40E_WRITE_FLUSH(hw);
6971 }
6972
6973 /*
6974  * Get global configurations of hash function type and symmetric hash enable
6975  * per flow type (pctype). Note that global configuration means it affects all
6976  * the ports on the same NIC.
6977  */
6978 static int
6979 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
6980                                    struct rte_eth_hash_global_conf *g_cfg)
6981 {
6982         uint32_t reg, mask = I40E_FLOW_TYPES;
6983         uint16_t i;
6984         enum i40e_filter_pctype pctype;
6985
6986         memset(g_cfg, 0, sizeof(*g_cfg));
6987         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
6988         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
6989                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
6990         else
6991                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
6992         PMD_DRV_LOG(DEBUG, "Hash function is %s",
6993                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
6994
6995         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
6996                 if (!(mask & (1UL << i)))
6997                         continue;
6998                 mask &= ~(1UL << i);
6999                 /* Bit set indicats the coresponding flow type is supported */
7000                 g_cfg->valid_bit_mask[0] |= (1UL << i);
7001                 /* if flowtype is invalid, continue */
7002                 if (!I40E_VALID_FLOW(i))
7003                         continue;
7004                 pctype = i40e_flowtype_to_pctype(i);
7005                 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
7006                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
7007                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
7008         }
7009
7010         return 0;
7011 }
7012
7013 static int
7014 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
7015 {
7016         uint32_t i;
7017         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
7018
7019         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
7020                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
7021                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
7022                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
7023                                                 g_cfg->hash_func);
7024                 return -EINVAL;
7025         }
7026
7027         /*
7028          * As i40e supports less than 32 flow types, only first 32 bits need to
7029          * be checked.
7030          */
7031         mask0 = g_cfg->valid_bit_mask[0];
7032         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
7033                 if (i == 0) {
7034                         /* Check if any unsupported flow type configured */
7035                         if ((mask0 | i40e_mask) ^ i40e_mask)
7036                                 goto mask_err;
7037                 } else {
7038                         if (g_cfg->valid_bit_mask[i])
7039                                 goto mask_err;
7040                 }
7041         }
7042
7043         return 0;
7044
7045 mask_err:
7046         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
7047
7048         return -EINVAL;
7049 }
7050
7051 /*
7052  * Set global configurations of hash function type and symmetric hash enable
7053  * per flow type (pctype). Note any modifying global configuration will affect
7054  * all the ports on the same NIC.
7055  */
7056 static int
7057 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
7058                                    struct rte_eth_hash_global_conf *g_cfg)
7059 {
7060         int ret;
7061         uint16_t i;
7062         uint32_t reg;
7063         uint32_t mask0 = g_cfg->valid_bit_mask[0];
7064         enum i40e_filter_pctype pctype;
7065
7066         /* Check the input parameters */
7067         ret = i40e_hash_global_config_check(g_cfg);
7068         if (ret < 0)
7069                 return ret;
7070
7071         for (i = 0; mask0 && i < UINT32_BIT; i++) {
7072                 if (!(mask0 & (1UL << i)))
7073                         continue;
7074                 mask0 &= ~(1UL << i);
7075                 /* if flowtype is invalid, continue */
7076                 if (!I40E_VALID_FLOW(i))
7077                         continue;
7078                 pctype = i40e_flowtype_to_pctype(i);
7079                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
7080                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
7081                 i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
7082         }
7083
7084         reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7085         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
7086                 /* Toeplitz */
7087                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
7088                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
7089                                                                 "Toeplitz");
7090                         goto out;
7091                 }
7092                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
7093         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
7094                 /* Simple XOR */
7095                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
7096                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
7097                                                         "Simple XOR");
7098                         goto out;
7099                 }
7100                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
7101         } else
7102                 /* Use the default, and keep it as it is */
7103                 goto out;
7104
7105         i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
7106
7107 out:
7108         I40E_WRITE_FLUSH(hw);
7109
7110         return 0;
7111 }
7112
7113 /**
7114  * Valid input sets for hash and flow director filters per PCTYPE
7115  */
7116 static uint64_t
7117 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
7118                 enum rte_filter_type filter)
7119 {
7120         uint64_t valid;
7121
7122         static const uint64_t valid_hash_inset_table[] = {
7123                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7124                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7125                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7126                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
7127                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
7128                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7129                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7130                         I40E_INSET_FLEX_PAYLOAD,
7131                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7132                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7133                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7134                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7135                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7136                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7137                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7138                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7139                         I40E_INSET_FLEX_PAYLOAD,
7140 #ifdef X722_SUPPORT
7141                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7142                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7143                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7144                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7145                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7146                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7147                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7148                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7149                         I40E_INSET_FLEX_PAYLOAD,
7150                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7151                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7152                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7153                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7154                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7155                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7156                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7157                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7158                         I40E_INSET_FLEX_PAYLOAD,
7159 #endif
7160                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7161                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7162                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7163                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7164                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7165                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7166                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7167                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7168                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7169 #ifdef X722_SUPPORT
7170                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7171                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7172                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7173                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7174                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7175                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7176                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7177                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7178                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7179 #endif
7180                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7181                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7182                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7183                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7184                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7185                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7186                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7187                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7188                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
7189                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7190                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7191                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7192                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7193                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7194                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7195                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7196                         I40E_INSET_FLEX_PAYLOAD,
7197                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
7198                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7199                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7200                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7201                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7202                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
7203                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
7204                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
7205                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7206                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7207                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7208                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7209                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7210                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7211                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7212                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
7213 #ifdef X722_SUPPORT
7214                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7215                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7216                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7217                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7218                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7219                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7220                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7221                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7222                         I40E_INSET_FLEX_PAYLOAD,
7223                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7224                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7225                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7226                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7227                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7228                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7229                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7230                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7231                         I40E_INSET_FLEX_PAYLOAD,
7232 #endif
7233                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7234                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7235                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7236                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7237                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7238                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7239                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7240                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7241                         I40E_INSET_FLEX_PAYLOAD,
7242 #ifdef X722_SUPPORT
7243                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7244                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7245                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7246                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7247                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7248                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7249                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7250                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7251                         I40E_INSET_FLEX_PAYLOAD,
7252 #endif
7253                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7254                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7255                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7256                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7257                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7258                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7259                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7260                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
7261                         I40E_INSET_FLEX_PAYLOAD,
7262                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7263                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7264                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7265                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7266                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7267                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7268                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
7269                         I40E_INSET_FLEX_PAYLOAD,
7270                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7271                         I40E_INSET_DMAC | I40E_INSET_SMAC |
7272                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7273                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
7274                         I40E_INSET_FLEX_PAYLOAD,
7275         };
7276
7277         /**
7278          * Flow director supports only fields defined in
7279          * union rte_eth_fdir_flow.
7280          */
7281         static const uint64_t valid_fdir_inset_table[] = {
7282                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7283                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7284                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7285                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
7286                 I40E_INSET_IPV4_TTL,
7287                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7288                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7289                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7290                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7291                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7292 #ifdef X722_SUPPORT
7293                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7294                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7295                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7296                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7297                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7298                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7299                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7300                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7301                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7302                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7303 #endif
7304                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7305                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7306                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7307                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7308                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7309 #ifdef X722_SUPPORT
7310                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7311                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7312                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7313                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7314                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7315 #endif
7316                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7317                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7318                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7319                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7320                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7321                 I40E_INSET_SCTP_VT,
7322                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7323                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7324                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7325                 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
7326                 I40E_INSET_IPV4_TTL,
7327                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
7328                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7329                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7330                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
7331                 I40E_INSET_IPV6_HOP_LIMIT,
7332                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7333                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7334                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7335                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7336                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7337 #ifdef X722_SUPPORT
7338                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7339                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7340                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7341                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7342                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7343                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7344                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7345                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7346                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7347                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7348 #endif
7349                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7350                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7351                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7352                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7353                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7354 #ifdef X722_SUPPORT
7355                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7356                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7357                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7358                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7359                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7360 #endif
7361                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7362                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7363                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7364                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7365                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7366                 I40E_INSET_SCTP_VT,
7367                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7368                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7369                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7370                 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
7371                 I40E_INSET_IPV6_HOP_LIMIT,
7372                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7373                 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7374                 I40E_INSET_LAST_ETHER_TYPE,
7375         };
7376
7377         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
7378                 return 0;
7379         if (filter == RTE_ETH_FILTER_HASH)
7380                 valid = valid_hash_inset_table[pctype];
7381         else
7382                 valid = valid_fdir_inset_table[pctype];
7383
7384         return valid;
7385 }
7386
7387 /**
7388  * Validate if the input set is allowed for a specific PCTYPE
7389  */
7390 static int
7391 i40e_validate_input_set(enum i40e_filter_pctype pctype,
7392                 enum rte_filter_type filter, uint64_t inset)
7393 {
7394         uint64_t valid;
7395
7396         valid = i40e_get_valid_input_set(pctype, filter);
7397         if (inset & (~valid))
7398                 return -EINVAL;
7399
7400         return 0;
7401 }
7402
7403 /* default input set fields combination per pctype */
7404 static uint64_t
7405 i40e_get_default_input_set(uint16_t pctype)
7406 {
7407         static const uint64_t default_inset_table[] = {
7408                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
7409                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
7410                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7411                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7412                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7413 #ifdef X722_SUPPORT
7414                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7415                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7416                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7417                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7418                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7419                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7420 #endif
7421                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7422                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7423                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7424 #ifdef X722_SUPPORT
7425                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7426                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7427                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7428 #endif
7429                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7430                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7431                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7432                         I40E_INSET_SCTP_VT,
7433                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7434                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
7435                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
7436                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
7437                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7438                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7439                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7440 #ifdef X722_SUPPORT
7441                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7442                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7443                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7444                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7445                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7446                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7447 #endif
7448                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7449                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7450                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7451 #ifdef X722_SUPPORT
7452                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7453                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7454                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7455 #endif
7456                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7457                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7458                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7459                         I40E_INSET_SCTP_VT,
7460                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7461                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
7462                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7463                         I40E_INSET_LAST_ETHER_TYPE,
7464         };
7465
7466         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
7467                 return 0;
7468
7469         return default_inset_table[pctype];
7470 }
7471
7472 /**
7473  * Parse the input set from index to logical bit masks
7474  */
7475 static int
7476 i40e_parse_input_set(uint64_t *inset,
7477                      enum i40e_filter_pctype pctype,
7478                      enum rte_eth_input_set_field *field,
7479                      uint16_t size)
7480 {
7481         uint16_t i, j;
7482         int ret = -EINVAL;
7483
7484         static const struct {
7485                 enum rte_eth_input_set_field field;
7486                 uint64_t inset;
7487         } inset_convert_table[] = {
7488                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
7489                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
7490                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
7491                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
7492                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
7493                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
7494                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
7495                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
7496                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
7497                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
7498                 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
7499                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
7500                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
7501                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
7502                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
7503                         I40E_INSET_IPV6_NEXT_HDR},
7504                 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
7505                         I40E_INSET_IPV6_HOP_LIMIT},
7506                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
7507                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
7508                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
7509                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
7510                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
7511                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
7512                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
7513                         I40E_INSET_SCTP_VT},
7514                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
7515                         I40E_INSET_TUNNEL_DMAC},
7516                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
7517                         I40E_INSET_VLAN_TUNNEL},
7518                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
7519                         I40E_INSET_TUNNEL_ID},
7520                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
7521                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
7522                         I40E_INSET_FLEX_PAYLOAD_W1},
7523                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
7524                         I40E_INSET_FLEX_PAYLOAD_W2},
7525                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
7526                         I40E_INSET_FLEX_PAYLOAD_W3},
7527                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
7528                         I40E_INSET_FLEX_PAYLOAD_W4},
7529                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
7530                         I40E_INSET_FLEX_PAYLOAD_W5},
7531                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
7532                         I40E_INSET_FLEX_PAYLOAD_W6},
7533                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
7534                         I40E_INSET_FLEX_PAYLOAD_W7},
7535                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
7536                         I40E_INSET_FLEX_PAYLOAD_W8},
7537         };
7538
7539         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
7540                 return ret;
7541
7542         /* Only one item allowed for default or all */
7543         if (size == 1) {
7544                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
7545                         *inset = i40e_get_default_input_set(pctype);
7546                         return 0;
7547                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
7548                         *inset = I40E_INSET_NONE;
7549                         return 0;
7550                 }
7551         }
7552
7553         for (i = 0, *inset = 0; i < size; i++) {
7554                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
7555                         if (field[i] == inset_convert_table[j].field) {
7556                                 *inset |= inset_convert_table[j].inset;
7557                                 break;
7558                         }
7559                 }
7560
7561                 /* It contains unsupported input set, return immediately */
7562                 if (j == RTE_DIM(inset_convert_table))
7563                         return ret;
7564         }
7565
7566         return 0;
7567 }
7568
7569 /**
7570  * Translate the input set from bit masks to register aware bit masks
7571  * and vice versa
7572  */
7573 static uint64_t
7574 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
7575 {
7576         uint64_t val = 0;
7577         uint16_t i;
7578
7579         struct inset_map {
7580                 uint64_t inset;
7581                 uint64_t inset_reg;
7582         };
7583
7584         static const struct inset_map inset_map_common[] = {
7585                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
7586                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
7587                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
7588                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
7589                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
7590                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
7591                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
7592                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
7593                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
7594                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
7595                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
7596                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
7597                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
7598                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
7599                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
7600                 {I40E_INSET_TUNNEL_DMAC,
7601                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
7602                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
7603                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
7604                 {I40E_INSET_TUNNEL_SRC_PORT,
7605                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
7606                 {I40E_INSET_TUNNEL_DST_PORT,
7607                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
7608                 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
7609                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
7610                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
7611                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
7612                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
7613                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
7614                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
7615                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
7616                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
7617         };
7618
7619     /* some different registers map in x722*/
7620         static const struct inset_map inset_map_diff_x722[] = {
7621                 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
7622                 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
7623                 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
7624                 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
7625         };
7626
7627         static const struct inset_map inset_map_diff_not_x722[] = {
7628                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
7629                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
7630                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
7631                 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
7632         };
7633
7634         if (input == 0)
7635                 return val;
7636
7637         /* Translate input set to register aware inset */
7638         if (type == I40E_MAC_X722) {
7639                 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
7640                         if (input & inset_map_diff_x722[i].inset)
7641                                 val |= inset_map_diff_x722[i].inset_reg;
7642                 }
7643         } else {
7644                 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
7645                         if (input & inset_map_diff_not_x722[i].inset)
7646                                 val |= inset_map_diff_not_x722[i].inset_reg;
7647                 }
7648         }
7649
7650         for (i = 0; i < RTE_DIM(inset_map_common); i++) {
7651                 if (input & inset_map_common[i].inset)
7652                         val |= inset_map_common[i].inset_reg;
7653         }
7654
7655         return val;
7656 }
7657
7658 static int
7659 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
7660 {
7661         uint8_t i, idx = 0;
7662         uint64_t inset_need_mask = inset;
7663
7664         static const struct {
7665                 uint64_t inset;
7666                 uint32_t mask;
7667         } inset_mask_map[] = {
7668                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
7669                 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
7670                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
7671                 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
7672                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
7673                 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
7674                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
7675                 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
7676         };
7677
7678         if (!inset || !mask || !nb_elem)
7679                 return 0;
7680
7681         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
7682                 /* Clear the inset bit, if no MASK is required,
7683                  * for example proto + ttl
7684                  */
7685                 if ((inset & inset_mask_map[i].inset) ==
7686                      inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
7687                         inset_need_mask &= ~inset_mask_map[i].inset;
7688                 if (!inset_need_mask)
7689                         return 0;
7690         }
7691         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
7692                 if ((inset_need_mask & inset_mask_map[i].inset) ==
7693                     inset_mask_map[i].inset) {
7694                         if (idx >= nb_elem) {
7695                                 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
7696                                 return -EINVAL;
7697                         }
7698                         mask[idx] = inset_mask_map[i].mask;
7699                         idx++;
7700                 }
7701         }
7702
7703         return idx;
7704 }
7705
7706 static void
7707 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
7708 {
7709         uint32_t reg = i40e_read_rx_ctl(hw, addr);
7710
7711         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
7712         if (reg != val)
7713                 i40e_write_rx_ctl(hw, addr, val);
7714         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
7715                     (uint32_t)i40e_read_rx_ctl(hw, addr));
7716 }
7717
7718 static void
7719 i40e_filter_input_set_init(struct i40e_pf *pf)
7720 {
7721         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7722         enum i40e_filter_pctype pctype;
7723         uint64_t input_set, inset_reg;
7724         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7725         int num, i;
7726
7727         for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
7728              pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
7729                 if (hw->mac.type == I40E_MAC_X722) {
7730                         if (!I40E_VALID_PCTYPE_X722(pctype))
7731                                 continue;
7732                 } else {
7733                         if (!I40E_VALID_PCTYPE(pctype))
7734                                 continue;
7735                 }
7736
7737                 input_set = i40e_get_default_input_set(pctype);
7738
7739                 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7740                                                    I40E_INSET_MASK_NUM_REG);
7741                 if (num < 0)
7742                         return;
7743                 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
7744                                         input_set);
7745
7746                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7747                                       (uint32_t)(inset_reg & UINT32_MAX));
7748                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7749                                      (uint32_t)((inset_reg >>
7750                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7751                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7752                                       (uint32_t)(inset_reg & UINT32_MAX));
7753                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7754                                      (uint32_t)((inset_reg >>
7755                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7756
7757                 for (i = 0; i < num; i++) {
7758                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7759                                              mask_reg[i]);
7760                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7761                                              mask_reg[i]);
7762                 }
7763                 /*clear unused mask registers of the pctype */
7764                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
7765                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7766                                              0);
7767                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7768                                              0);
7769                 }
7770                 I40E_WRITE_FLUSH(hw);
7771
7772                 /* store the default input set */
7773                 pf->hash_input_set[pctype] = input_set;
7774                 pf->fdir.input_set[pctype] = input_set;
7775         }
7776 }
7777
7778 int
7779 i40e_hash_filter_inset_select(struct i40e_hw *hw,
7780                          struct rte_eth_input_set_conf *conf)
7781 {
7782         struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
7783         enum i40e_filter_pctype pctype;
7784         uint64_t input_set, inset_reg = 0;
7785         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7786         int ret, i, num;
7787
7788         if (!conf) {
7789                 PMD_DRV_LOG(ERR, "Invalid pointer");
7790                 return -EFAULT;
7791         }
7792         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
7793             conf->op != RTE_ETH_INPUT_SET_ADD) {
7794                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
7795                 return -EINVAL;
7796         }
7797
7798         if (!I40E_VALID_FLOW(conf->flow_type)) {
7799                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
7800                 return -EINVAL;
7801         }
7802
7803         if (hw->mac.type == I40E_MAC_X722) {
7804                 /* get translated pctype value in fd pctype register */
7805                 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
7806                         I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
7807                         conf->flow_type)));
7808         } else
7809                 pctype = i40e_flowtype_to_pctype(conf->flow_type);
7810
7811         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7812                                    conf->inset_size);
7813         if (ret) {
7814                 PMD_DRV_LOG(ERR, "Failed to parse input set");
7815                 return -EINVAL;
7816         }
7817         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH,
7818                                     input_set) != 0) {
7819                 PMD_DRV_LOG(ERR, "Invalid input set");
7820                 return -EINVAL;
7821         }
7822         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
7823                 /* get inset value in register */
7824                 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
7825                 inset_reg <<= I40E_32_BIT_WIDTH;
7826                 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
7827                 input_set |= pf->hash_input_set[pctype];
7828         }
7829         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7830                                            I40E_INSET_MASK_NUM_REG);
7831         if (num < 0)
7832                 return -EINVAL;
7833
7834         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
7835
7836         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7837                               (uint32_t)(inset_reg & UINT32_MAX));
7838         i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7839                              (uint32_t)((inset_reg >>
7840                              I40E_32_BIT_WIDTH) & UINT32_MAX));
7841
7842         for (i = 0; i < num; i++)
7843                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7844                                      mask_reg[i]);
7845         /*clear unused mask registers of the pctype */
7846         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
7847                 i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7848                                      0);
7849         I40E_WRITE_FLUSH(hw);
7850
7851         pf->hash_input_set[pctype] = input_set;
7852         return 0;
7853 }
7854
7855 int
7856 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
7857                          struct rte_eth_input_set_conf *conf)
7858 {
7859         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7860         enum i40e_filter_pctype pctype;
7861         uint64_t input_set, inset_reg = 0;
7862         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7863         int ret, i, num;
7864
7865         if (!hw || !conf) {
7866                 PMD_DRV_LOG(ERR, "Invalid pointer");
7867                 return -EFAULT;
7868         }
7869         if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
7870             conf->op != RTE_ETH_INPUT_SET_ADD) {
7871                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
7872                 return -EINVAL;
7873         }
7874
7875         if (!I40E_VALID_FLOW(conf->flow_type)) {
7876                 PMD_DRV_LOG(ERR, "invalid flow_type input.");
7877                 return -EINVAL;
7878         }
7879
7880         pctype = i40e_flowtype_to_pctype(conf->flow_type);
7881
7882         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7883                                    conf->inset_size);
7884         if (ret) {
7885                 PMD_DRV_LOG(ERR, "Failed to parse input set");
7886                 return -EINVAL;
7887         }
7888         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
7889                                     input_set) != 0) {
7890                 PMD_DRV_LOG(ERR, "Invalid input set");
7891                 return -EINVAL;
7892         }
7893
7894         /* get inset value in register */
7895         inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
7896         inset_reg <<= I40E_32_BIT_WIDTH;
7897         inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
7898
7899         /* Can not change the inset reg for flex payload for fdir,
7900          * it is done by writing I40E_PRTQF_FD_FLXINSET
7901          * in i40e_set_flex_mask_on_pctype.
7902          */
7903         if (conf->op == RTE_ETH_INPUT_SET_SELECT)
7904                 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
7905         else
7906                 input_set |= pf->fdir.input_set[pctype];
7907         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7908                                            I40E_INSET_MASK_NUM_REG);
7909         if (num < 0)
7910                 return -EINVAL;
7911
7912         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
7913
7914         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7915                               (uint32_t)(inset_reg & UINT32_MAX));
7916         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7917                              (uint32_t)((inset_reg >>
7918                              I40E_32_BIT_WIDTH) & UINT32_MAX));
7919
7920         for (i = 0; i < num; i++)
7921                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7922                                      mask_reg[i]);
7923         /*clear unused mask registers of the pctype */
7924         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
7925                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7926                                      0);
7927         I40E_WRITE_FLUSH(hw);
7928
7929         pf->fdir.input_set[pctype] = input_set;
7930         return 0;
7931 }
7932
7933 static int
7934 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7935 {
7936         int ret = 0;
7937
7938         if (!hw || !info) {
7939                 PMD_DRV_LOG(ERR, "Invalid pointer");
7940                 return -EFAULT;
7941         }
7942
7943         switch (info->info_type) {
7944         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7945                 i40e_get_symmetric_hash_enable_per_port(hw,
7946                                         &(info->info.enable));
7947                 break;
7948         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7949                 ret = i40e_get_hash_filter_global_config(hw,
7950                                 &(info->info.global_conf));
7951                 break;
7952         default:
7953                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7954                                                         info->info_type);
7955                 ret = -EINVAL;
7956                 break;
7957         }
7958
7959         return ret;
7960 }
7961
7962 static int
7963 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7964 {
7965         int ret = 0;
7966
7967         if (!hw || !info) {
7968                 PMD_DRV_LOG(ERR, "Invalid pointer");
7969                 return -EFAULT;
7970         }
7971
7972         switch (info->info_type) {
7973         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7974                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
7975                 break;
7976         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7977                 ret = i40e_set_hash_filter_global_config(hw,
7978                                 &(info->info.global_conf));
7979                 break;
7980         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
7981                 ret = i40e_hash_filter_inset_select(hw,
7982                                                &(info->info.input_set_conf));
7983                 break;
7984
7985         default:
7986                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7987                                                         info->info_type);
7988                 ret = -EINVAL;
7989                 break;
7990         }
7991
7992         return ret;
7993 }
7994
7995 /* Operations for hash function */
7996 static int
7997 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
7998                       enum rte_filter_op filter_op,
7999                       void *arg)
8000 {
8001         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8002         int ret = 0;
8003
8004         switch (filter_op) {
8005         case RTE_ETH_FILTER_NOP:
8006                 break;
8007         case RTE_ETH_FILTER_GET:
8008                 ret = i40e_hash_filter_get(hw,
8009                         (struct rte_eth_hash_filter_info *)arg);
8010                 break;
8011         case RTE_ETH_FILTER_SET:
8012                 ret = i40e_hash_filter_set(hw,
8013                         (struct rte_eth_hash_filter_info *)arg);
8014                 break;
8015         default:
8016                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
8017                                                                 filter_op);
8018                 ret = -ENOTSUP;
8019                 break;
8020         }
8021
8022         return ret;
8023 }
8024
8025 /*
8026  * Configure ethertype filter, which can director packet by filtering
8027  * with mac address and ether_type or only ether_type
8028  */
8029 static int
8030 i40e_ethertype_filter_set(struct i40e_pf *pf,
8031                         struct rte_eth_ethertype_filter *filter,
8032                         bool add)
8033 {
8034         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8035         struct i40e_control_filter_stats stats;
8036         uint16_t flags = 0;
8037         int ret;
8038
8039         if (filter->queue >= pf->dev_data->nb_rx_queues) {
8040                 PMD_DRV_LOG(ERR, "Invalid queue ID");
8041                 return -EINVAL;
8042         }
8043         if (filter->ether_type == ETHER_TYPE_IPv4 ||
8044                 filter->ether_type == ETHER_TYPE_IPv6) {
8045                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
8046                         " control packet filter.", filter->ether_type);
8047                 return -EINVAL;
8048         }
8049         if (filter->ether_type == ETHER_TYPE_VLAN)
8050                 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
8051                         " not supported.");
8052
8053         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
8054                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
8055         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
8056                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
8057         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
8058
8059         memset(&stats, 0, sizeof(stats));
8060         ret = i40e_aq_add_rem_control_packet_filter(hw,
8061                         filter->mac_addr.addr_bytes,
8062                         filter->ether_type, flags,
8063                         pf->main_vsi->seid,
8064                         filter->queue, add, &stats, NULL);
8065
8066         PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
8067                          " mac_etype_used = %u, etype_used = %u,"
8068                          " mac_etype_free = %u, etype_free = %u\n",
8069                          ret, stats.mac_etype_used, stats.etype_used,
8070                          stats.mac_etype_free, stats.etype_free);
8071         if (ret < 0)
8072                 return -ENOSYS;
8073         return 0;
8074 }
8075
8076 /*
8077  * Handle operations for ethertype filter.
8078  */
8079 static int
8080 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
8081                                 enum rte_filter_op filter_op,
8082                                 void *arg)
8083 {
8084         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8085         int ret = 0;
8086
8087         if (filter_op == RTE_ETH_FILTER_NOP)
8088                 return ret;
8089
8090         if (arg == NULL) {
8091                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
8092                             filter_op);
8093                 return -EINVAL;
8094         }
8095
8096         switch (filter_op) {
8097         case RTE_ETH_FILTER_ADD:
8098                 ret = i40e_ethertype_filter_set(pf,
8099                         (struct rte_eth_ethertype_filter *)arg,
8100                         TRUE);
8101                 break;
8102         case RTE_ETH_FILTER_DELETE:
8103                 ret = i40e_ethertype_filter_set(pf,
8104                         (struct rte_eth_ethertype_filter *)arg,
8105                         FALSE);
8106                 break;
8107         default:
8108                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
8109                 ret = -ENOSYS;
8110                 break;
8111         }
8112         return ret;
8113 }
8114
8115 static int
8116 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
8117                      enum rte_filter_type filter_type,
8118                      enum rte_filter_op filter_op,
8119                      void *arg)
8120 {
8121         int ret = 0;
8122
8123         if (dev == NULL)
8124                 return -EINVAL;
8125
8126         switch (filter_type) {
8127         case RTE_ETH_FILTER_NONE:
8128                 /* For global configuration */
8129                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
8130                 break;
8131         case RTE_ETH_FILTER_HASH:
8132                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
8133                 break;
8134         case RTE_ETH_FILTER_MACVLAN:
8135                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
8136                 break;
8137         case RTE_ETH_FILTER_ETHERTYPE:
8138                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
8139                 break;
8140         case RTE_ETH_FILTER_TUNNEL:
8141                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
8142                 break;
8143         case RTE_ETH_FILTER_FDIR:
8144                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
8145                 break;
8146         default:
8147                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
8148                                                         filter_type);
8149                 ret = -EINVAL;
8150                 break;
8151         }
8152
8153         return ret;
8154 }
8155
8156 /*
8157  * Check and enable Extended Tag.
8158  * Enabling Extended Tag is important for 40G performance.
8159  */
8160 static void
8161 i40e_enable_extended_tag(struct rte_eth_dev *dev)
8162 {
8163         uint32_t buf = 0;
8164         int ret;
8165
8166         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
8167                                       PCI_DEV_CAP_REG);
8168         if (ret < 0) {
8169                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
8170                             PCI_DEV_CAP_REG);
8171                 return;
8172         }
8173         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
8174                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
8175                 return;
8176         }
8177
8178         buf = 0;
8179         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
8180                                       PCI_DEV_CTRL_REG);
8181         if (ret < 0) {
8182                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
8183                             PCI_DEV_CTRL_REG);
8184                 return;
8185         }
8186         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
8187                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
8188                 return;
8189         }
8190         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
8191         ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
8192                                        PCI_DEV_CTRL_REG);
8193         if (ret < 0) {
8194                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
8195                             PCI_DEV_CTRL_REG);
8196                 return;
8197         }
8198 }
8199
8200 /*
8201  * As some registers wouldn't be reset unless a global hardware reset,
8202  * hardware initialization is needed to put those registers into an
8203  * expected initial state.
8204  */
8205 static void
8206 i40e_hw_init(struct rte_eth_dev *dev)
8207 {
8208         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8209
8210         i40e_enable_extended_tag(dev);
8211
8212         /* clear the PF Queue Filter control register */
8213         i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
8214
8215         /* Disable symmetric hash per port */
8216         i40e_set_symmetric_hash_enable_per_port(hw, 0);
8217 }
8218
8219 enum i40e_filter_pctype
8220 i40e_flowtype_to_pctype(uint16_t flow_type)
8221 {
8222         static const enum i40e_filter_pctype pctype_table[] = {
8223                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
8224                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
8225                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8226                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
8227                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8228                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
8229                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8230                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
8231                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8232                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
8233                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
8234                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8235                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
8236                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8237                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
8238                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8239                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
8240                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8241                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
8242         };
8243
8244         return pctype_table[flow_type];
8245 }
8246
8247 uint16_t
8248 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
8249 {
8250         static const uint16_t flowtype_table[] = {
8251                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
8252                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8253                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8254 #ifdef X722_SUPPORT
8255                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8256                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8257                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8258                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8259 #endif
8260                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8261                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
8262 #ifdef X722_SUPPORT
8263                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8264                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
8265 #endif
8266                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8267                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
8268                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8269                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
8270                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
8271                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8272                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8273 #ifdef X722_SUPPORT
8274                 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8275                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8276                 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8277                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8278 #endif
8279                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8280                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
8281 #ifdef X722_SUPPORT
8282                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8283                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
8284 #endif
8285                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8286                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
8287                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8288                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
8289                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
8290         };
8291
8292         return flowtype_table[pctype];
8293 }
8294
8295 /*
8296  * On X710, performance number is far from the expectation on recent firmware
8297  * versions; on XL710, performance number is also far from the expectation on
8298  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
8299  * mode is enabled and port MAC address is equal to the packet destination MAC
8300  * address. The fix for this issue may not be integrated in the following
8301  * firmware version. So the workaround in software driver is needed. It needs
8302  * to modify the initial values of 3 internal only registers for both X710 and
8303  * XL710. Note that the values for X710 or XL710 could be different, and the
8304  * workaround can be removed when it is fixed in firmware in the future.
8305  */
8306
8307 /* For both X710 and XL710 */
8308 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
8309 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
8310
8311 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
8312 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
8313
8314 /* For X710 */
8315 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
8316 /* For XL710 */
8317 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
8318 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
8319
8320 static int
8321 i40e_dev_sync_phy_type(struct i40e_hw *hw)
8322 {
8323         enum i40e_status_code status;
8324         struct i40e_aq_get_phy_abilities_resp phy_ab;
8325         int ret = -ENOTSUP;
8326
8327         status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
8328                                               NULL);
8329
8330         if (status)
8331                 return ret;
8332
8333         return 0;
8334 }
8335
8336
8337 static void
8338 i40e_configure_registers(struct i40e_hw *hw)
8339 {
8340         static struct {
8341                 uint32_t addr;
8342                 uint64_t val;
8343         } reg_table[] = {
8344                 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
8345                 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
8346                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
8347         };
8348         uint64_t reg;
8349         uint32_t i;
8350         int ret;
8351
8352         for (i = 0; i < RTE_DIM(reg_table); i++) {
8353                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
8354                         if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
8355                             I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
8356                                 reg_table[i].val =
8357                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
8358                         else /* For X710 */
8359                                 reg_table[i].val =
8360                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
8361                 }
8362
8363                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
8364                                                         &reg, NULL);
8365                 if (ret < 0) {
8366                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
8367                                                         reg_table[i].addr);
8368                         break;
8369                 }
8370                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
8371                                                 reg_table[i].addr, reg);
8372                 if (reg == reg_table[i].val)
8373                         continue;
8374
8375                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
8376                                                 reg_table[i].val, NULL);
8377                 if (ret < 0) {
8378                         PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
8379                                 "address of 0x%"PRIx32, reg_table[i].val,
8380                                                         reg_table[i].addr);
8381                         break;
8382                 }
8383                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
8384                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
8385         }
8386 }
8387
8388 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
8389 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
8390 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
8391 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
8392 static int
8393 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
8394 {
8395         uint32_t reg;
8396         int ret;
8397
8398         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
8399                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
8400                 return -EINVAL;
8401         }
8402
8403         /* Configure for double VLAN RX stripping */
8404         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
8405         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
8406                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
8407                 ret = i40e_aq_debug_write_register(hw,
8408                                                    I40E_VSI_TSR(vsi->vsi_id),
8409                                                    reg, NULL);
8410                 if (ret < 0) {
8411                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
8412                                     vsi->vsi_id);
8413                         return I40E_ERR_CONFIG;
8414                 }
8415         }
8416
8417         /* Configure for double VLAN TX insertion */
8418         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
8419         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
8420                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
8421                 ret = i40e_aq_debug_write_register(hw,
8422                                                    I40E_VSI_L2TAGSTXVALID(
8423                                                    vsi->vsi_id), reg, NULL);
8424                 if (ret < 0) {
8425                         PMD_DRV_LOG(ERR, "Failed to update "
8426                                 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
8427                         return I40E_ERR_CONFIG;
8428                 }
8429         }
8430
8431         return 0;
8432 }
8433
8434 /**
8435  * i40e_aq_add_mirror_rule
8436  * @hw: pointer to the hardware structure
8437  * @seid: VEB seid to add mirror rule to
8438  * @dst_id: destination vsi seid
8439  * @entries: Buffer which contains the entities to be mirrored
8440  * @count: number of entities contained in the buffer
8441  * @rule_id:the rule_id of the rule to be added
8442  *
8443  * Add a mirror rule for a given veb.
8444  *
8445  **/
8446 static enum i40e_status_code
8447 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
8448                         uint16_t seid, uint16_t dst_id,
8449                         uint16_t rule_type, uint16_t *entries,
8450                         uint16_t count, uint16_t *rule_id)
8451 {
8452         struct i40e_aq_desc desc;
8453         struct i40e_aqc_add_delete_mirror_rule cmd;
8454         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
8455                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
8456                 &desc.params.raw;
8457         uint16_t buff_len;
8458         enum i40e_status_code status;
8459
8460         i40e_fill_default_direct_cmd_desc(&desc,
8461                                           i40e_aqc_opc_add_mirror_rule);
8462         memset(&cmd, 0, sizeof(cmd));
8463
8464         buff_len = sizeof(uint16_t) * count;
8465         desc.datalen = rte_cpu_to_le_16(buff_len);
8466         if (buff_len > 0)
8467                 desc.flags |= rte_cpu_to_le_16(
8468                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
8469         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
8470                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
8471         cmd.num_entries = rte_cpu_to_le_16(count);
8472         cmd.seid = rte_cpu_to_le_16(seid);
8473         cmd.destination = rte_cpu_to_le_16(dst_id);
8474
8475         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
8476         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
8477         PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
8478                          "rule_id = %u"
8479                          " mirror_rules_used = %u, mirror_rules_free = %u,",
8480                          hw->aq.asq_last_status, resp->rule_id,
8481                          resp->mirror_rules_used, resp->mirror_rules_free);
8482         *rule_id = rte_le_to_cpu_16(resp->rule_id);
8483
8484         return status;
8485 }
8486
8487 /**
8488  * i40e_aq_del_mirror_rule
8489  * @hw: pointer to the hardware structure
8490  * @seid: VEB seid to add mirror rule to
8491  * @entries: Buffer which contains the entities to be mirrored
8492  * @count: number of entities contained in the buffer
8493  * @rule_id:the rule_id of the rule to be delete
8494  *
8495  * Delete a mirror rule for a given veb.
8496  *
8497  **/
8498 static enum i40e_status_code
8499 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
8500                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
8501                 uint16_t count, uint16_t rule_id)
8502 {
8503         struct i40e_aq_desc desc;
8504         struct i40e_aqc_add_delete_mirror_rule cmd;
8505         uint16_t buff_len = 0;
8506         enum i40e_status_code status;
8507         void *buff = NULL;
8508
8509         i40e_fill_default_direct_cmd_desc(&desc,
8510                                           i40e_aqc_opc_delete_mirror_rule);
8511         memset(&cmd, 0, sizeof(cmd));
8512         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
8513                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
8514                                                           I40E_AQ_FLAG_RD));
8515                 cmd.num_entries = count;
8516                 buff_len = sizeof(uint16_t) * count;
8517                 desc.datalen = rte_cpu_to_le_16(buff_len);
8518                 buff = (void *)entries;
8519         } else
8520                 /* rule id is filled in destination field for deleting mirror rule */
8521                 cmd.destination = rte_cpu_to_le_16(rule_id);
8522
8523         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
8524                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
8525         cmd.seid = rte_cpu_to_le_16(seid);
8526
8527         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
8528         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
8529
8530         return status;
8531 }
8532
8533 /**
8534  * i40e_mirror_rule_set
8535  * @dev: pointer to the hardware structure
8536  * @mirror_conf: mirror rule info
8537  * @sw_id: mirror rule's sw_id
8538  * @on: enable/disable
8539  *
8540  * set a mirror rule.
8541  *
8542  **/
8543 static int
8544 i40e_mirror_rule_set(struct rte_eth_dev *dev,
8545                         struct rte_eth_mirror_conf *mirror_conf,
8546                         uint8_t sw_id, uint8_t on)
8547 {
8548         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8549         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8550         struct i40e_mirror_rule *it, *mirr_rule = NULL;
8551         struct i40e_mirror_rule *parent = NULL;
8552         uint16_t seid, dst_seid, rule_id;
8553         uint16_t i, j = 0;
8554         int ret;
8555
8556         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
8557
8558         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
8559                 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
8560                         " without veb or vfs.");
8561                 return -ENOSYS;
8562         }
8563         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
8564                 PMD_DRV_LOG(ERR, "mirror table is full.");
8565                 return -ENOSPC;
8566         }
8567         if (mirror_conf->dst_pool > pf->vf_num) {
8568                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
8569                                  mirror_conf->dst_pool);
8570                 return -EINVAL;
8571         }
8572
8573         seid = pf->main_vsi->veb->seid;
8574
8575         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
8576                 if (sw_id <= it->index) {
8577                         mirr_rule = it;
8578                         break;
8579                 }
8580                 parent = it;
8581         }
8582         if (mirr_rule && sw_id == mirr_rule->index) {
8583                 if (on) {
8584                         PMD_DRV_LOG(ERR, "mirror rule exists.");
8585                         return -EEXIST;
8586                 } else {
8587                         ret = i40e_aq_del_mirror_rule(hw, seid,
8588                                         mirr_rule->rule_type,
8589                                         mirr_rule->entries,
8590                                         mirr_rule->num_entries, mirr_rule->id);
8591                         if (ret < 0) {
8592                                 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
8593                                                    " ret = %d, aq_err = %d.",
8594                                                    ret, hw->aq.asq_last_status);
8595                                 return -ENOSYS;
8596                         }
8597                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
8598                         rte_free(mirr_rule);
8599                         pf->nb_mirror_rule--;
8600                         return 0;
8601                 }
8602         } else if (!on) {
8603                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
8604                 return -ENOENT;
8605         }
8606
8607         mirr_rule = rte_zmalloc("i40e_mirror_rule",
8608                                 sizeof(struct i40e_mirror_rule) , 0);
8609         if (!mirr_rule) {
8610                 PMD_DRV_LOG(ERR, "failed to allocate memory");
8611                 return I40E_ERR_NO_MEMORY;
8612         }
8613         switch (mirror_conf->rule_type) {
8614         case ETH_MIRROR_VLAN:
8615                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
8616                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
8617                                 mirr_rule->entries[j] =
8618                                         mirror_conf->vlan.vlan_id[i];
8619                                 j++;
8620                         }
8621                 }
8622                 if (j == 0) {
8623                         PMD_DRV_LOG(ERR, "vlan is not specified.");
8624                         rte_free(mirr_rule);
8625                         return -EINVAL;
8626                 }
8627                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
8628                 break;
8629         case ETH_MIRROR_VIRTUAL_POOL_UP:
8630         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
8631                 /* check if the specified pool bit is out of range */
8632                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
8633                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
8634                         rte_free(mirr_rule);
8635                         return -EINVAL;
8636                 }
8637                 for (i = 0, j = 0; i < pf->vf_num; i++) {
8638                         if (mirror_conf->pool_mask & (1ULL << i)) {
8639                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
8640                                 j++;
8641                         }
8642                 }
8643                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
8644                         /* add pf vsi to entries */
8645                         mirr_rule->entries[j] = pf->main_vsi_seid;
8646                         j++;
8647                 }
8648                 if (j == 0) {
8649                         PMD_DRV_LOG(ERR, "pool is not specified.");
8650                         rte_free(mirr_rule);
8651                         return -EINVAL;
8652                 }
8653                 /* egress and ingress in aq commands means from switch but not port */
8654                 mirr_rule->rule_type =
8655                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
8656                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
8657                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
8658                 break;
8659         case ETH_MIRROR_UPLINK_PORT:
8660                 /* egress and ingress in aq commands means from switch but not port*/
8661                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
8662                 break;
8663         case ETH_MIRROR_DOWNLINK_PORT:
8664                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
8665                 break;
8666         default:
8667                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
8668                         mirror_conf->rule_type);
8669                 rte_free(mirr_rule);
8670                 return -EINVAL;
8671         }
8672
8673         /* If the dst_pool is equal to vf_num, consider it as PF */
8674         if (mirror_conf->dst_pool == pf->vf_num)
8675                 dst_seid = pf->main_vsi_seid;
8676         else
8677                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
8678
8679         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
8680                                       mirr_rule->rule_type, mirr_rule->entries,
8681                                       j, &rule_id);
8682         if (ret < 0) {
8683                 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
8684                                    " ret = %d, aq_err = %d.",
8685                                    ret, hw->aq.asq_last_status);
8686                 rte_free(mirr_rule);
8687                 return -ENOSYS;
8688         }
8689
8690         mirr_rule->index = sw_id;
8691         mirr_rule->num_entries = j;
8692         mirr_rule->id = rule_id;
8693         mirr_rule->dst_vsi_seid = dst_seid;
8694
8695         if (parent)
8696                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
8697         else
8698                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
8699
8700         pf->nb_mirror_rule++;
8701         return 0;
8702 }
8703
8704 /**
8705  * i40e_mirror_rule_reset
8706  * @dev: pointer to the device
8707  * @sw_id: mirror rule's sw_id
8708  *
8709  * reset a mirror rule.
8710  *
8711  **/
8712 static int
8713 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
8714 {
8715         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8716         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8717         struct i40e_mirror_rule *it, *mirr_rule = NULL;
8718         uint16_t seid;
8719         int ret;
8720
8721         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
8722
8723         seid = pf->main_vsi->veb->seid;
8724
8725         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
8726                 if (sw_id == it->index) {
8727                         mirr_rule = it;
8728                         break;
8729                 }
8730         }
8731         if (mirr_rule) {
8732                 ret = i40e_aq_del_mirror_rule(hw, seid,
8733                                 mirr_rule->rule_type,
8734                                 mirr_rule->entries,
8735                                 mirr_rule->num_entries, mirr_rule->id);
8736                 if (ret < 0) {
8737                         PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
8738                                            " status = %d, aq_err = %d.",
8739                                            ret, hw->aq.asq_last_status);
8740                         return -ENOSYS;
8741                 }
8742                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
8743                 rte_free(mirr_rule);
8744                 pf->nb_mirror_rule--;
8745         } else {
8746                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
8747                 return -ENOENT;
8748         }
8749         return 0;
8750 }
8751
8752 static uint64_t
8753 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
8754 {
8755         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8756         uint64_t systim_cycles;
8757
8758         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
8759         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
8760                         << 32;
8761
8762         return systim_cycles;
8763 }
8764
8765 static uint64_t
8766 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
8767 {
8768         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8769         uint64_t rx_tstamp;
8770
8771         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
8772         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
8773                         << 32;
8774
8775         return rx_tstamp;
8776 }
8777
8778 static uint64_t
8779 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
8780 {
8781         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8782         uint64_t tx_tstamp;
8783
8784         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
8785         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
8786                         << 32;
8787
8788         return tx_tstamp;
8789 }
8790
8791 static void
8792 i40e_start_timecounters(struct rte_eth_dev *dev)
8793 {
8794         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8795         struct i40e_adapter *adapter =
8796                         (struct i40e_adapter *)dev->data->dev_private;
8797         struct rte_eth_link link;
8798         uint32_t tsync_inc_l;
8799         uint32_t tsync_inc_h;
8800
8801         /* Get current link speed. */
8802         memset(&link, 0, sizeof(link));
8803         i40e_dev_link_update(dev, 1);
8804         rte_i40e_dev_atomic_read_link_status(dev, &link);
8805
8806         switch (link.link_speed) {
8807         case ETH_SPEED_NUM_40G:
8808                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
8809                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
8810                 break;
8811         case ETH_SPEED_NUM_10G:
8812                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
8813                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
8814                 break;
8815         case ETH_SPEED_NUM_1G:
8816                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
8817                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
8818                 break;
8819         default:
8820                 tsync_inc_l = 0x0;
8821                 tsync_inc_h = 0x0;
8822         }
8823
8824         /* Set the timesync increment value. */
8825         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
8826         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
8827
8828         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
8829         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8830         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8831
8832         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8833         adapter->systime_tc.cc_shift = 0;
8834         adapter->systime_tc.nsec_mask = 0;
8835
8836         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8837         adapter->rx_tstamp_tc.cc_shift = 0;
8838         adapter->rx_tstamp_tc.nsec_mask = 0;
8839
8840         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8841         adapter->tx_tstamp_tc.cc_shift = 0;
8842         adapter->tx_tstamp_tc.nsec_mask = 0;
8843 }
8844
8845 static int
8846 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
8847 {
8848         struct i40e_adapter *adapter =
8849                         (struct i40e_adapter *)dev->data->dev_private;
8850
8851         adapter->systime_tc.nsec += delta;
8852         adapter->rx_tstamp_tc.nsec += delta;
8853         adapter->tx_tstamp_tc.nsec += delta;
8854
8855         return 0;
8856 }
8857
8858 static int
8859 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
8860 {
8861         uint64_t ns;
8862         struct i40e_adapter *adapter =
8863                         (struct i40e_adapter *)dev->data->dev_private;
8864
8865         ns = rte_timespec_to_ns(ts);
8866
8867         /* Set the timecounters to a new value. */
8868         adapter->systime_tc.nsec = ns;
8869         adapter->rx_tstamp_tc.nsec = ns;
8870         adapter->tx_tstamp_tc.nsec = ns;
8871
8872         return 0;
8873 }
8874
8875 static int
8876 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
8877 {
8878         uint64_t ns, systime_cycles;
8879         struct i40e_adapter *adapter =
8880                         (struct i40e_adapter *)dev->data->dev_private;
8881
8882         systime_cycles = i40e_read_systime_cyclecounter(dev);
8883         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
8884         *ts = rte_ns_to_timespec(ns);
8885
8886         return 0;
8887 }
8888
8889 static int
8890 i40e_timesync_enable(struct rte_eth_dev *dev)
8891 {
8892         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8893         uint32_t tsync_ctl_l;
8894         uint32_t tsync_ctl_h;
8895
8896         /* Stop the timesync system time. */
8897         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8898         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8899         /* Reset the timesync system time value. */
8900         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
8901         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
8902
8903         i40e_start_timecounters(dev);
8904
8905         /* Clear timesync registers. */
8906         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8907         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
8908         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
8909         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
8910         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
8911         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
8912
8913         /* Enable timestamping of PTP packets. */
8914         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8915         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
8916
8917         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8918         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
8919         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
8920
8921         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8922         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8923
8924         return 0;
8925 }
8926
8927 static int
8928 i40e_timesync_disable(struct rte_eth_dev *dev)
8929 {
8930         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8931         uint32_t tsync_ctl_l;
8932         uint32_t tsync_ctl_h;
8933
8934         /* Disable timestamping of transmitted PTP packets. */
8935         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8936         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
8937
8938         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8939         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
8940
8941         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8942         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8943
8944         /* Reset the timesync increment value. */
8945         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8946         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8947
8948         return 0;
8949 }
8950
8951 static int
8952 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
8953                                 struct timespec *timestamp, uint32_t flags)
8954 {
8955         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8956         struct i40e_adapter *adapter =
8957                 (struct i40e_adapter *)dev->data->dev_private;
8958
8959         uint32_t sync_status;
8960         uint32_t index = flags & 0x03;
8961         uint64_t rx_tstamp_cycles;
8962         uint64_t ns;
8963
8964         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
8965         if ((sync_status & (1 << index)) == 0)
8966                 return -EINVAL;
8967
8968         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
8969         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
8970         *timestamp = rte_ns_to_timespec(ns);
8971
8972         return 0;
8973 }
8974
8975 static int
8976 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
8977                                 struct timespec *timestamp)
8978 {
8979         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8980         struct i40e_adapter *adapter =
8981                 (struct i40e_adapter *)dev->data->dev_private;
8982
8983         uint32_t sync_status;
8984         uint64_t tx_tstamp_cycles;
8985         uint64_t ns;
8986
8987         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8988         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
8989                 return -EINVAL;
8990
8991         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
8992         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
8993         *timestamp = rte_ns_to_timespec(ns);
8994
8995         return 0;
8996 }
8997
8998 /*
8999  * i40e_parse_dcb_configure - parse dcb configure from user
9000  * @dev: the device being configured
9001  * @dcb_cfg: pointer of the result of parse
9002  * @*tc_map: bit map of enabled traffic classes
9003  *
9004  * Returns 0 on success, negative value on failure
9005  */
9006 static int
9007 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
9008                          struct i40e_dcbx_config *dcb_cfg,
9009                          uint8_t *tc_map)
9010 {
9011         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
9012         uint8_t i, tc_bw, bw_lf;
9013
9014         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
9015
9016         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
9017         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
9018                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
9019                 return -EINVAL;
9020         }
9021
9022         /* assume each tc has the same bw */
9023         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
9024         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9025                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
9026         /* to ensure the sum of tcbw is equal to 100 */
9027         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
9028         for (i = 0; i < bw_lf; i++)
9029                 dcb_cfg->etscfg.tcbwtable[i]++;
9030
9031         /* assume each tc has the same Transmission Selection Algorithm */
9032         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
9033                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
9034
9035         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
9036                 dcb_cfg->etscfg.prioritytable[i] =
9037                                 dcb_rx_conf->dcb_tc[i];
9038
9039         /* FW needs one App to configure HW */
9040         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
9041         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
9042         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
9043         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
9044
9045         if (dcb_rx_conf->nb_tcs == 0)
9046                 *tc_map = 1; /* tc0 only */
9047         else
9048                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
9049
9050         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
9051                 dcb_cfg->pfc.willing = 0;
9052                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
9053                 dcb_cfg->pfc.pfcenable = *tc_map;
9054         }
9055         return 0;
9056 }
9057
9058
9059 static enum i40e_status_code
9060 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
9061                               struct i40e_aqc_vsi_properties_data *info,
9062                               uint8_t enabled_tcmap)
9063 {
9064         enum i40e_status_code ret;
9065         int i, total_tc = 0;
9066         uint16_t qpnum_per_tc, bsf, qp_idx;
9067         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
9068         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
9069         uint16_t used_queues;
9070
9071         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
9072         if (ret != I40E_SUCCESS)
9073                 return ret;
9074
9075         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9076                 if (enabled_tcmap & (1 << i))
9077                         total_tc++;
9078         }
9079         if (total_tc == 0)
9080                 total_tc = 1;
9081         vsi->enabled_tc = enabled_tcmap;
9082
9083         /* different VSI has different queues assigned */
9084         if (vsi->type == I40E_VSI_MAIN)
9085                 used_queues = dev_data->nb_rx_queues -
9086                         pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9087         else if (vsi->type == I40E_VSI_VMDQ2)
9088                 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9089         else {
9090                 PMD_INIT_LOG(ERR, "unsupported VSI type.");
9091                 return I40E_ERR_NO_AVAILABLE_VSI;
9092         }
9093
9094         qpnum_per_tc = used_queues / total_tc;
9095         /* Number of queues per enabled TC */
9096         if (qpnum_per_tc == 0) {
9097                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
9098                 return I40E_ERR_INVALID_QP_ID;
9099         }
9100         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
9101                                 I40E_MAX_Q_PER_TC);
9102         bsf = rte_bsf32(qpnum_per_tc);
9103
9104         /**
9105          * Configure TC and queue mapping parameters, for enabled TC,
9106          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
9107          * default queue will serve it.
9108          */
9109         qp_idx = 0;
9110         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9111                 if (vsi->enabled_tc & (1 << i)) {
9112                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
9113                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
9114                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
9115                         qp_idx += qpnum_per_tc;
9116                 } else
9117                         info->tc_mapping[i] = 0;
9118         }
9119
9120         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
9121         if (vsi->type == I40E_VSI_SRIOV) {
9122                 info->mapping_flags |=
9123                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
9124                 for (i = 0; i < vsi->nb_qps; i++)
9125                         info->queue_mapping[i] =
9126                                 rte_cpu_to_le_16(vsi->base_queue + i);
9127         } else {
9128                 info->mapping_flags |=
9129                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
9130                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
9131         }
9132         info->valid_sections |=
9133                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
9134
9135         return I40E_SUCCESS;
9136 }
9137
9138 /*
9139  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
9140  * @veb: VEB to be configured
9141  * @tc_map: enabled TC bitmap
9142  *
9143  * Returns 0 on success, negative value on failure
9144  */
9145 static enum i40e_status_code
9146 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
9147 {
9148         struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
9149         struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
9150         struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
9151         struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
9152         enum i40e_status_code ret = I40E_SUCCESS;
9153         int i;
9154         uint32_t bw_max;
9155
9156         /* Check if enabled_tc is same as existing or new TCs */
9157         if (veb->enabled_tc == tc_map)
9158                 return ret;
9159
9160         /* configure tc bandwidth */
9161         memset(&veb_bw, 0, sizeof(veb_bw));
9162         veb_bw.tc_valid_bits = tc_map;
9163         /* Enable ETS TCs with equal BW Share for now across all VSIs */
9164         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9165                 if (tc_map & BIT_ULL(i))
9166                         veb_bw.tc_bw_share_credits[i] = 1;
9167         }
9168         ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
9169                                                    &veb_bw, NULL);
9170         if (ret) {
9171                 PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
9172                                   " per TC failed = %d",
9173                                   hw->aq.asq_last_status);
9174                 return ret;
9175         }
9176
9177         memset(&ets_query, 0, sizeof(ets_query));
9178         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9179                                                    &ets_query, NULL);
9180         if (ret != I40E_SUCCESS) {
9181                 PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
9182                                  " configuration %u", hw->aq.asq_last_status);
9183                 return ret;
9184         }
9185         memset(&bw_query, 0, sizeof(bw_query));
9186         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9187                                                   &bw_query, NULL);
9188         if (ret != I40E_SUCCESS) {
9189                 PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
9190                                  " configuration %u", hw->aq.asq_last_status);
9191                 return ret;
9192         }
9193
9194         /* store and print out BW info */
9195         veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
9196         veb->bw_info.bw_max = ets_query.tc_bw_max;
9197         PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
9198         PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
9199         bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
9200                     (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
9201                      I40E_16_BIT_WIDTH);
9202         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9203                 veb->bw_info.bw_ets_share_credits[i] =
9204                                 bw_query.tc_bw_share_credits[i];
9205                 veb->bw_info.bw_ets_credits[i] =
9206                                 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
9207                 /* 4 bits per TC, 4th bit is reserved */
9208                 veb->bw_info.bw_ets_max[i] =
9209                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
9210                                   RTE_LEN2MASK(3, uint8_t));
9211                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
9212                             veb->bw_info.bw_ets_share_credits[i]);
9213                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
9214                             veb->bw_info.bw_ets_credits[i]);
9215                 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
9216                             veb->bw_info.bw_ets_max[i]);
9217         }
9218
9219         veb->enabled_tc = tc_map;
9220
9221         return ret;
9222 }
9223
9224
9225 /*
9226  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
9227  * @vsi: VSI to be configured
9228  * @tc_map: enabled TC bitmap
9229  *
9230  * Returns 0 on success, negative value on failure
9231  */
9232 static enum i40e_status_code
9233 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
9234 {
9235         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
9236         struct i40e_vsi_context ctxt;
9237         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
9238         enum i40e_status_code ret = I40E_SUCCESS;
9239         int i;
9240
9241         /* Check if enabled_tc is same as existing or new TCs */
9242         if (vsi->enabled_tc == tc_map)
9243                 return ret;
9244
9245         /* configure tc bandwidth */
9246         memset(&bw_data, 0, sizeof(bw_data));
9247         bw_data.tc_valid_bits = tc_map;
9248         /* Enable ETS TCs with equal BW Share for now across all VSIs */
9249         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9250                 if (tc_map & BIT_ULL(i))
9251                         bw_data.tc_bw_credits[i] = 1;
9252         }
9253         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
9254         if (ret) {
9255                 PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
9256                         " per TC failed = %d",
9257                         hw->aq.asq_last_status);
9258                 goto out;
9259         }
9260         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
9261                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
9262
9263         /* Update Queue Pairs Mapping for currently enabled UPs */
9264         ctxt.seid = vsi->seid;
9265         ctxt.pf_num = hw->pf_id;
9266         ctxt.vf_num = 0;
9267         ctxt.uplink_seid = vsi->uplink_seid;
9268         ctxt.info = vsi->info;
9269         i40e_get_cap(hw);
9270         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
9271         if (ret)
9272                 goto out;
9273
9274         /* Update the VSI after updating the VSI queue-mapping information */
9275         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9276         if (ret) {
9277                 PMD_INIT_LOG(ERR, "Failed to configure "
9278                             "TC queue mapping = %d",
9279                             hw->aq.asq_last_status);
9280                 goto out;
9281         }
9282         /* update the local VSI info with updated queue map */
9283         (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
9284                                         sizeof(vsi->info.tc_mapping));
9285         (void)rte_memcpy(&vsi->info.queue_mapping,
9286                         &ctxt.info.queue_mapping,
9287                 sizeof(vsi->info.queue_mapping));
9288         vsi->info.mapping_flags = ctxt.info.mapping_flags;
9289         vsi->info.valid_sections = 0;
9290
9291         /* query and update current VSI BW information */
9292         ret = i40e_vsi_get_bw_config(vsi);
9293         if (ret) {
9294                 PMD_INIT_LOG(ERR,
9295                          "Failed updating vsi bw info, err %s aq_err %s",
9296                          i40e_stat_str(hw, ret),
9297                          i40e_aq_str(hw, hw->aq.asq_last_status));
9298                 goto out;
9299         }
9300
9301         vsi->enabled_tc = tc_map;
9302
9303 out:
9304         return ret;
9305 }
9306
9307 /*
9308  * i40e_dcb_hw_configure - program the dcb setting to hw
9309  * @pf: pf the configuration is taken on
9310  * @new_cfg: new configuration
9311  * @tc_map: enabled TC bitmap
9312  *
9313  * Returns 0 on success, negative value on failure
9314  */
9315 static enum i40e_status_code
9316 i40e_dcb_hw_configure(struct i40e_pf *pf,
9317                       struct i40e_dcbx_config *new_cfg,
9318                       uint8_t tc_map)
9319 {
9320         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9321         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
9322         struct i40e_vsi *main_vsi = pf->main_vsi;
9323         struct i40e_vsi_list *vsi_list;
9324         enum i40e_status_code ret;
9325         int i;
9326         uint32_t val;
9327
9328         /* Use the FW API if FW > v4.4*/
9329         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
9330               (hw->aq.fw_maj_ver >= 5))) {
9331                 PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
9332                                   " to configure DCB");
9333                 return I40E_ERR_FIRMWARE_API_VERSION;
9334         }
9335
9336         /* Check if need reconfiguration */
9337         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
9338                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
9339                 return I40E_SUCCESS;
9340         }
9341
9342         /* Copy the new config to the current config */
9343         *old_cfg = *new_cfg;
9344         old_cfg->etsrec = old_cfg->etscfg;
9345         ret = i40e_set_dcb_config(hw);
9346         if (ret) {
9347                 PMD_INIT_LOG(ERR,
9348                          "Set DCB Config failed, err %s aq_err %s\n",
9349                          i40e_stat_str(hw, ret),
9350                          i40e_aq_str(hw, hw->aq.asq_last_status));
9351                 return ret;
9352         }
9353         /* set receive Arbiter to RR mode and ETS scheme by default */
9354         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
9355                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
9356                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
9357                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
9358                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
9359                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
9360                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
9361                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
9362                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
9363                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
9364                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
9365                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
9366                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
9367         }
9368         /* get local mib to check whether it is configured correctly */
9369         /* IEEE mode */
9370         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
9371         /* Get Local DCB Config */
9372         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
9373                                      &hw->local_dcbx_config);
9374
9375         /* if Veb is created, need to update TC of it at first */
9376         if (main_vsi->veb) {
9377                 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
9378                 if (ret)
9379                         PMD_INIT_LOG(WARNING,
9380                                  "Failed configuring TC for VEB seid=%d\n",
9381                                  main_vsi->veb->seid);
9382         }
9383         /* Update each VSI */
9384         i40e_vsi_config_tc(main_vsi, tc_map);
9385         if (main_vsi->veb) {
9386                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
9387                         /* Beside main VSI and VMDQ VSIs, only enable default
9388                          * TC for other VSIs
9389                          */
9390                         if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
9391                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
9392                                                          tc_map);
9393                         else
9394                                 ret = i40e_vsi_config_tc(vsi_list->vsi,
9395                                                          I40E_DEFAULT_TCMAP);
9396                         if (ret)
9397                                 PMD_INIT_LOG(WARNING,
9398                                          "Failed configuring TC for VSI seid=%d\n",
9399                                          vsi_list->vsi->seid);
9400                         /* continue */
9401                 }
9402         }
9403         return I40E_SUCCESS;
9404 }
9405
9406 /*
9407  * i40e_dcb_init_configure - initial dcb config
9408  * @dev: device being configured
9409  * @sw_dcb: indicate whether dcb is sw configured or hw offload
9410  *
9411  * Returns 0 on success, negative value on failure
9412  */
9413 static int
9414 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
9415 {
9416         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9417         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9418         int ret = 0;
9419
9420         if ((pf->flags & I40E_FLAG_DCB) == 0) {
9421                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
9422                 return -ENOTSUP;
9423         }
9424
9425         /* DCB initialization:
9426          * Update DCB configuration from the Firmware and configure
9427          * LLDP MIB change event.
9428          */
9429         if (sw_dcb == TRUE) {
9430                 ret = i40e_init_dcb(hw);
9431                 /* If lldp agent is stopped, the return value from
9432                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
9433                  * adminq status. Otherwise, it should return success.
9434                  */
9435                 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
9436                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
9437                         memset(&hw->local_dcbx_config, 0,
9438                                 sizeof(struct i40e_dcbx_config));
9439                         /* set dcb default configuration */
9440                         hw->local_dcbx_config.etscfg.willing = 0;
9441                         hw->local_dcbx_config.etscfg.maxtcs = 0;
9442                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
9443                         hw->local_dcbx_config.etscfg.tsatable[0] =
9444                                                 I40E_IEEE_TSA_ETS;
9445                         hw->local_dcbx_config.etsrec =
9446                                 hw->local_dcbx_config.etscfg;
9447                         hw->local_dcbx_config.pfc.willing = 0;
9448                         hw->local_dcbx_config.pfc.pfccap =
9449                                                 I40E_MAX_TRAFFIC_CLASS;
9450                         /* FW needs one App to configure HW */
9451                         hw->local_dcbx_config.numapps = 1;
9452                         hw->local_dcbx_config.app[0].selector =
9453                                                 I40E_APP_SEL_ETHTYPE;
9454                         hw->local_dcbx_config.app[0].priority = 3;
9455                         hw->local_dcbx_config.app[0].protocolid =
9456                                                 I40E_APP_PROTOID_FCOE;
9457                         ret = i40e_set_dcb_config(hw);
9458                         if (ret) {
9459                                 PMD_INIT_LOG(ERR, "default dcb config fails."
9460                                         " err = %d, aq_err = %d.", ret,
9461                                           hw->aq.asq_last_status);
9462                                 return -ENOSYS;
9463                         }
9464                 } else {
9465                         PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
9466                                           " err = %d, aq_err = %d.", ret,
9467                                           hw->aq.asq_last_status);
9468                         return -ENOTSUP;
9469                 }
9470         } else {
9471                 ret = i40e_aq_start_lldp(hw, NULL);
9472                 if (ret != I40E_SUCCESS)
9473                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
9474
9475                 ret = i40e_init_dcb(hw);
9476                 if (!ret) {
9477                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
9478                                 PMD_INIT_LOG(ERR, "HW doesn't support"
9479                                                   " DCBX offload.");
9480                                 return -ENOTSUP;
9481                         }
9482                 } else {
9483                         PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
9484                                           " aq_err = %d.", ret,
9485                                           hw->aq.asq_last_status);
9486                         return -ENOTSUP;
9487                 }
9488         }
9489         return 0;
9490 }
9491
9492 /*
9493  * i40e_dcb_setup - setup dcb related config
9494  * @dev: device being configured
9495  *
9496  * Returns 0 on success, negative value on failure
9497  */
9498 static int
9499 i40e_dcb_setup(struct rte_eth_dev *dev)
9500 {
9501         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9502         struct i40e_dcbx_config dcb_cfg;
9503         uint8_t tc_map = 0;
9504         int ret = 0;
9505
9506         if ((pf->flags & I40E_FLAG_DCB) == 0) {
9507                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
9508                 return -ENOTSUP;
9509         }
9510
9511         if (pf->vf_num != 0)
9512                 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
9513
9514         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
9515         if (ret) {
9516                 PMD_INIT_LOG(ERR, "invalid dcb config");
9517                 return -EINVAL;
9518         }
9519         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
9520         if (ret) {
9521                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
9522                 return -ENOSYS;
9523         }
9524
9525         return 0;
9526 }
9527
9528 static int
9529 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
9530                       struct rte_eth_dcb_info *dcb_info)
9531 {
9532         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9533         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9534         struct i40e_vsi *vsi = pf->main_vsi;
9535         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
9536         uint16_t bsf, tc_mapping;
9537         int i, j = 0;
9538
9539         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
9540                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
9541         else
9542                 dcb_info->nb_tcs = 1;
9543         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
9544                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
9545         for (i = 0; i < dcb_info->nb_tcs; i++)
9546                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
9547
9548         /* get queue mapping if vmdq is disabled */
9549         if (!pf->nb_cfg_vmdq_vsi) {
9550                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9551                         if (!(vsi->enabled_tc & (1 << i)))
9552                                 continue;
9553                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
9554                         dcb_info->tc_queue.tc_rxq[j][i].base =
9555                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
9556                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
9557                         dcb_info->tc_queue.tc_txq[j][i].base =
9558                                 dcb_info->tc_queue.tc_rxq[j][i].base;
9559                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
9560                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
9561                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
9562                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
9563                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
9564                 }
9565                 return 0;
9566         }
9567
9568         /* get queue mapping if vmdq is enabled */
9569         do {
9570                 vsi = pf->vmdq[j].vsi;
9571                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9572                         if (!(vsi->enabled_tc & (1 << i)))
9573                                 continue;
9574                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
9575                         dcb_info->tc_queue.tc_rxq[j][i].base =
9576                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
9577                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
9578                         dcb_info->tc_queue.tc_txq[j][i].base =
9579                                 dcb_info->tc_queue.tc_rxq[j][i].base;
9580                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
9581                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
9582                         dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
9583                         dcb_info->tc_queue.tc_txq[j][i].nb_queue =
9584                                 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
9585                 }
9586                 j++;
9587         } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
9588         return 0;
9589 }
9590
9591 static int
9592 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
9593 {
9594         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
9595         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9596         uint16_t interval =
9597                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
9598         uint16_t msix_intr;
9599
9600         msix_intr = intr_handle->intr_vec[queue_id];
9601         if (msix_intr == I40E_MISC_VEC_ID)
9602                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
9603                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
9604                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
9605                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
9606                                (interval <<
9607                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
9608         else
9609                 I40E_WRITE_REG(hw,
9610                                I40E_PFINT_DYN_CTLN(msix_intr -
9611                                                    I40E_RX_VEC_START),
9612                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
9613                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
9614                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
9615                                (interval <<
9616                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
9617
9618         I40E_WRITE_FLUSH(hw);
9619         rte_intr_enable(&dev->pci_dev->intr_handle);
9620
9621         return 0;
9622 }
9623
9624 static int
9625 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
9626 {
9627         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
9628         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9629         uint16_t msix_intr;
9630
9631         msix_intr = intr_handle->intr_vec[queue_id];
9632         if (msix_intr == I40E_MISC_VEC_ID)
9633                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
9634         else
9635                 I40E_WRITE_REG(hw,
9636                                I40E_PFINT_DYN_CTLN(msix_intr -
9637                                                    I40E_RX_VEC_START),
9638                                0);
9639         I40E_WRITE_FLUSH(hw);
9640
9641         return 0;
9642 }
9643
9644 static int i40e_get_regs(struct rte_eth_dev *dev,
9645                          struct rte_dev_reg_info *regs)
9646 {
9647         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9648         uint32_t *ptr_data = regs->data;
9649         uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
9650         const struct i40e_reg_info *reg_info;
9651
9652         if (ptr_data == NULL) {
9653                 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
9654                 regs->width = sizeof(uint32_t);
9655                 return 0;
9656         }
9657
9658         /* The first few registers have to be read using AQ operations */
9659         reg_idx = 0;
9660         while (i40e_regs_adminq[reg_idx].name) {
9661                 reg_info = &i40e_regs_adminq[reg_idx++];
9662                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
9663                         for (arr_idx2 = 0;
9664                                         arr_idx2 <= reg_info->count2;
9665                                         arr_idx2++) {
9666                                 reg_offset = arr_idx * reg_info->stride1 +
9667                                         arr_idx2 * reg_info->stride2;
9668                                 reg_offset += reg_info->base_addr;
9669                                 ptr_data[reg_offset >> 2] =
9670                                         i40e_read_rx_ctl(hw, reg_offset);
9671                         }
9672         }
9673
9674         /* The remaining registers can be read using primitives */
9675         reg_idx = 0;
9676         while (i40e_regs_others[reg_idx].name) {
9677                 reg_info = &i40e_regs_others[reg_idx++];
9678                 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
9679                         for (arr_idx2 = 0;
9680                                         arr_idx2 <= reg_info->count2;
9681                                         arr_idx2++) {
9682                                 reg_offset = arr_idx * reg_info->stride1 +
9683                                         arr_idx2 * reg_info->stride2;
9684                                 reg_offset += reg_info->base_addr;
9685                                 ptr_data[reg_offset >> 2] =
9686                                         I40E_READ_REG(hw, reg_offset);
9687                         }
9688         }
9689
9690         return 0;
9691 }
9692
9693 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
9694 {
9695         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9696
9697         /* Convert word count to byte count */
9698         return hw->nvm.sr_size << 1;
9699 }
9700
9701 static int i40e_get_eeprom(struct rte_eth_dev *dev,
9702                            struct rte_dev_eeprom_info *eeprom)
9703 {
9704         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9705         uint16_t *data = eeprom->data;
9706         uint16_t offset, length, cnt_words;
9707         int ret_code;
9708
9709         offset = eeprom->offset >> 1;
9710         length = eeprom->length >> 1;
9711         cnt_words = length;
9712
9713         if (offset > hw->nvm.sr_size ||
9714                 offset + length > hw->nvm.sr_size) {
9715                 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
9716                 return -EINVAL;
9717         }
9718
9719         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
9720
9721         ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
9722         if (ret_code != I40E_SUCCESS || cnt_words != length) {
9723                 PMD_DRV_LOG(ERR, "EEPROM read failed.");
9724                 return -EIO;
9725         }
9726
9727         return 0;
9728 }
9729
9730 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
9731                                       struct ether_addr *mac_addr)
9732 {
9733         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9734
9735         if (!is_valid_assigned_ether_addr(mac_addr)) {
9736                 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
9737                 return;
9738         }
9739
9740         /* Flags: 0x3 updates port address */
9741         i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
9742 }
9743
9744 static int
9745 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
9746 {
9747         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9748         struct rte_eth_dev_data *dev_data = pf->dev_data;
9749         uint32_t frame_size = mtu + ETHER_HDR_LEN
9750                               + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
9751         int ret = 0;
9752
9753         /* check if mtu is within the allowed range */
9754         if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
9755                 return -EINVAL;
9756
9757         /* mtu setting is forbidden if port is start */
9758         if (dev_data->dev_started) {
9759                 PMD_DRV_LOG(ERR,
9760                             "port %d must be stopped before configuration\n",
9761                             dev_data->port_id);
9762                 return -EBUSY;
9763         }
9764
9765         if (frame_size > ETHER_MAX_LEN)
9766                 dev_data->dev_conf.rxmode.jumbo_frame = 1;
9767         else
9768                 dev_data->dev_conf.rxmode.jumbo_frame = 0;
9769
9770         dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
9771
9772         return ret;
9773 }