i40e: enable extended tag
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <assert.h>
43
44 #include <rte_string_fns.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memzone.h>
49 #include <rte_malloc.h>
50 #include <rte_memcpy.h>
51 #include <rte_alarm.h>
52 #include <rte_dev.h>
53 #include <rte_eth_ctrl.h>
54
55 #include "i40e_logs.h"
56 #include "base/i40e_prototype.h"
57 #include "base/i40e_adminq_cmd.h"
58 #include "base/i40e_type.h"
59 #include "base/i40e_register.h"
60 #include "base/i40e_dcb.h"
61 #include "i40e_ethdev.h"
62 #include "i40e_rxtx.h"
63 #include "i40e_pf.h"
64
65 /* Maximun number of MAC addresses */
66 #define I40E_NUM_MACADDR_MAX       64
67 #define I40E_CLEAR_PXE_WAIT_MS     200
68
69 /* Maximun number of capability elements */
70 #define I40E_MAX_CAP_ELE_NUM       128
71
72 /* Wait count and inteval */
73 #define I40E_CHK_Q_ENA_COUNT       1000
74 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
75
76 /* Maximun number of VSI */
77 #define I40E_MAX_NUM_VSIS          (384UL)
78
79 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
80
81 /* Flow control default timer */
82 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
83
84 /* Flow control default high water */
85 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
86
87 /* Flow control default low water */
88 #define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
89
90 /* Flow control enable fwd bit */
91 #define I40E_PRTMAC_FWD_CTRL   0x00000001
92
93 /* Receive Packet Buffer size */
94 #define I40E_RXPBSIZE (968 * 1024)
95
96 /* Kilobytes shift */
97 #define I40E_KILOSHIFT 10
98
99 /* Receive Average Packet Size in Byte*/
100 #define I40E_PACKET_AVERAGE_SIZE 128
101
102 /* Mask of PF interrupt causes */
103 #define I40E_PFINT_ICR0_ENA_MASK ( \
104                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
105                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
106                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
107                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
108                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
109                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
110                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
111                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
112                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
113                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
114
115 #define I40E_FLOW_TYPES ( \
116         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
117         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
118         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
119         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
120         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
121         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
122         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
123         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
124         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
125         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
126         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
127
128 /* Additional timesync values. */
129 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
130 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
131 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
132 #define I40E_PRTTSYN_TSYNENA     0x80000000
133 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
134 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
135
136 #define I40E_MAX_PERCENT            100
137 #define I40E_DEFAULT_DCB_APP_NUM    1
138 #define I40E_DEFAULT_DCB_APP_PRIO   3
139
140 #define I40E_PRTQF_FD_INSET(_i, _j)  (0x00250000 + ((_i) * 64 + (_j) * 32))
141 #define I40E_GLQF_FD_MSK(_i, _j)     (0x00267200 + ((_i) * 4 + (_j) * 8))
142 #define I40E_GLQF_FD_MSK_FIELD       0x0000FFFF
143 #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8))
144 #define I40E_GLQF_HASH_MSK(_i, _j)   (0x00267A00 + ((_i) * 4 + (_j) * 8))
145 #define I40E_GLQF_HASH_MSK_FIELD      0x0000FFFF
146
147 #define I40E_INSET_NONE            0x00000000000000000ULL
148
149 /* bit0 ~ bit 7 */
150 #define I40E_INSET_DMAC            0x0000000000000001ULL
151 #define I40E_INSET_SMAC            0x0000000000000002ULL
152 #define I40E_INSET_VLAN_OUTER      0x0000000000000004ULL
153 #define I40E_INSET_VLAN_INNER      0x0000000000000008ULL
154 #define I40E_INSET_VLAN_TUNNEL     0x0000000000000010ULL
155
156 /* bit 8 ~ bit 15 */
157 #define I40E_INSET_IPV4_SRC        0x0000000000000100ULL
158 #define I40E_INSET_IPV4_DST        0x0000000000000200ULL
159 #define I40E_INSET_IPV6_SRC        0x0000000000000400ULL
160 #define I40E_INSET_IPV6_DST        0x0000000000000800ULL
161 #define I40E_INSET_SRC_PORT        0x0000000000001000ULL
162 #define I40E_INSET_DST_PORT        0x0000000000002000ULL
163 #define I40E_INSET_SCTP_VT         0x0000000000004000ULL
164
165 /* bit 16 ~ bit 31 */
166 #define I40E_INSET_IPV4_TOS        0x0000000000010000ULL
167 #define I40E_INSET_IPV4_PROTO      0x0000000000020000ULL
168 #define I40E_INSET_IPV4_TTL        0x0000000000040000ULL
169 #define I40E_INSET_IPV6_TC         0x0000000000080000ULL
170 #define I40E_INSET_IPV6_FLOW       0x0000000000100000ULL
171 #define I40E_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
172 #define I40E_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
173 #define I40E_INSET_TCP_FLAGS       0x0000000000800000ULL
174
175 /* bit 32 ~ bit 47, tunnel fields */
176 #define I40E_INSET_TUNNEL_IPV4_DST       0x0000000100000000ULL
177 #define I40E_INSET_TUNNEL_IPV6_DST       0x0000000200000000ULL
178 #define I40E_INSET_TUNNEL_DMAC           0x0000000400000000ULL
179 #define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
180 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
181 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
182
183 /* bit 48 ~ bit 55 */
184 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
185
186 /* bit 56 ~ bit 63, Flex Payload */
187 #define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
188 #define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
189 #define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
190 #define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
191 #define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
192 #define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
193 #define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
194 #define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
195 #define I40E_INSET_FLEX_PAYLOAD \
196         (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
197         I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
198         I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
199         I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
200
201 /**
202  * Below are values for writing un-exposed registers suggested
203  * by silicon experts
204  */
205 /* Destination MAC address */
206 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
207 /* Source MAC address */
208 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
209 /* VLAN tag in the outer L2 header */
210 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0080000000000000ULL
211 /* VLAN tag in the inner L2 header */
212 #define I40E_REG_INSET_L2_INNER_VLAN             0x0100000000000000ULL
213 /* Source IPv4 address */
214 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
215 /* Destination IPv4 address */
216 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
217 /* IPv4 Type of Service (TOS) */
218 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
219 /* IPv4 Protocol */
220 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
221 /* Source IPv6 address */
222 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
223 /* Destination IPv6 address */
224 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
225 /* IPv6 Traffic Class (TC) */
226 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
227 /* IPv6 Next Header */
228 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
229 /* Source L4 port */
230 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
231 /* Destination L4 port */
232 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
233 /* SCTP verification tag */
234 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
235 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
236 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
237 /* Source port of tunneling UDP */
238 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
239 /* Destination port of tunneling UDP */
240 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
241 /* UDP Tunneling ID, NVGRE/GRE key */
242 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
243 /* Last ether type */
244 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
245 /* Tunneling outer destination IPv4 address */
246 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
247 /* Tunneling outer destination IPv6 address */
248 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
249 /* 1st word of flex payload */
250 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
251 /* 2nd word of flex payload */
252 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
253 /* 3rd word of flex payload */
254 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
255 /* 4th word of flex payload */
256 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
257 /* 5th word of flex payload */
258 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
259 /* 6th word of flex payload */
260 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
261 /* 7th word of flex payload */
262 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
263 /* 8th word of flex payload */
264 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
265
266 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
267
268 #define I40E_TRANSLATE_INSET 0
269 #define I40E_TRANSLATE_REG   1
270
271 #define I40E_INSET_IPV4_TOS_MASK      0x0009FF00UL
272 #define I40E_INSET_IPV4_PROTO_MASK    0x000DFF00UL
273 #define I40E_INSET_IPV6_TC_MASK       0x0009F00FUL
274 #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
275
276 /* PCI offset for querying capability */
277 #define PCI_DEV_CAP_REG            0xA4
278 /* PCI offset for enabling/disabling Extended Tag */
279 #define PCI_DEV_CTRL_REG           0xA8
280 /* Bit mask of Extended Tag capability */
281 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
282 /* Bit shift of Extended Tag enable/disable */
283 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
284 /* Bit mask of Extended Tag enable/disable */
285 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
286
287 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
288 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
289 static int i40e_dev_configure(struct rte_eth_dev *dev);
290 static int i40e_dev_start(struct rte_eth_dev *dev);
291 static void i40e_dev_stop(struct rte_eth_dev *dev);
292 static void i40e_dev_close(struct rte_eth_dev *dev);
293 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
294 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
295 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
296 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
297 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
298 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
299 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
300                                struct rte_eth_stats *stats);
301 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
302                                struct rte_eth_xstats *xstats, unsigned n);
303 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
304 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
305                                             uint16_t queue_id,
306                                             uint8_t stat_idx,
307                                             uint8_t is_rx);
308 static void i40e_dev_info_get(struct rte_eth_dev *dev,
309                               struct rte_eth_dev_info *dev_info);
310 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
311                                 uint16_t vlan_id,
312                                 int on);
313 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
314 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
315 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
316                                       uint16_t queue,
317                                       int on);
318 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
319 static int i40e_dev_led_on(struct rte_eth_dev *dev);
320 static int i40e_dev_led_off(struct rte_eth_dev *dev);
321 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
322                               struct rte_eth_fc_conf *fc_conf);
323 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
324                               struct rte_eth_fc_conf *fc_conf);
325 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
326                                        struct rte_eth_pfc_conf *pfc_conf);
327 static void i40e_macaddr_add(struct rte_eth_dev *dev,
328                           struct ether_addr *mac_addr,
329                           uint32_t index,
330                           uint32_t pool);
331 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
332 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
333                                     struct rte_eth_rss_reta_entry64 *reta_conf,
334                                     uint16_t reta_size);
335 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
336                                    struct rte_eth_rss_reta_entry64 *reta_conf,
337                                    uint16_t reta_size);
338
339 static int i40e_get_cap(struct i40e_hw *hw);
340 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
341 static int i40e_pf_setup(struct i40e_pf *pf);
342 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
343 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
344 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
345 static int i40e_dcb_setup(struct rte_eth_dev *dev);
346 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
347                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
348 static void i40e_stat_update_48(struct i40e_hw *hw,
349                                uint32_t hireg,
350                                uint32_t loreg,
351                                bool offset_loaded,
352                                uint64_t *offset,
353                                uint64_t *stat);
354 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
355 static void i40e_dev_interrupt_handler(
356                 __rte_unused struct rte_intr_handle *handle, void *param);
357 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
358                                 uint32_t base, uint32_t num);
359 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
360 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
361                         uint32_t base);
362 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
363                         uint16_t num);
364 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
365 static int i40e_veb_release(struct i40e_veb *veb);
366 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
367                                                 struct i40e_vsi *vsi);
368 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
369 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
370 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
371                                              struct i40e_macvlan_filter *mv_f,
372                                              int num,
373                                              struct ether_addr *addr);
374 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
375                                              struct i40e_macvlan_filter *mv_f,
376                                              int num,
377                                              uint16_t vlan);
378 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
379 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
380                                     struct rte_eth_rss_conf *rss_conf);
381 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
382                                       struct rte_eth_rss_conf *rss_conf);
383 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
384                                 struct rte_eth_udp_tunnel *udp_tunnel);
385 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
386                                 struct rte_eth_udp_tunnel *udp_tunnel);
387 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
388                         struct rte_eth_ethertype_filter *filter,
389                         bool add);
390 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
391                                 enum rte_filter_op filter_op,
392                                 void *arg);
393 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
394                                 enum rte_filter_type filter_type,
395                                 enum rte_filter_op filter_op,
396                                 void *arg);
397 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
398                                   struct rte_eth_dcb_info *dcb_info);
399 static void i40e_configure_registers(struct i40e_hw *hw);
400 static void i40e_hw_init(struct rte_eth_dev *dev);
401 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
402 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
403                         struct rte_eth_mirror_conf *mirror_conf,
404                         uint8_t sw_id, uint8_t on);
405 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
406
407 static int i40e_timesync_enable(struct rte_eth_dev *dev);
408 static int i40e_timesync_disable(struct rte_eth_dev *dev);
409 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
410                                            struct timespec *timestamp,
411                                            uint32_t flags);
412 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
413                                            struct timespec *timestamp);
414 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
415
416 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
417
418 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
419                                    struct timespec *timestamp);
420 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
421                                     const struct timespec *timestamp);
422
423 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
424                                          uint16_t queue_id);
425 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
426                                           uint16_t queue_id);
427
428
429 static const struct rte_pci_id pci_id_i40e_map[] = {
430 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
431 #include "rte_pci_dev_ids.h"
432 { .vendor_id = 0, /* sentinel */ },
433 };
434
435 static const struct eth_dev_ops i40e_eth_dev_ops = {
436         .dev_configure                = i40e_dev_configure,
437         .dev_start                    = i40e_dev_start,
438         .dev_stop                     = i40e_dev_stop,
439         .dev_close                    = i40e_dev_close,
440         .promiscuous_enable           = i40e_dev_promiscuous_enable,
441         .promiscuous_disable          = i40e_dev_promiscuous_disable,
442         .allmulticast_enable          = i40e_dev_allmulticast_enable,
443         .allmulticast_disable         = i40e_dev_allmulticast_disable,
444         .dev_set_link_up              = i40e_dev_set_link_up,
445         .dev_set_link_down            = i40e_dev_set_link_down,
446         .link_update                  = i40e_dev_link_update,
447         .stats_get                    = i40e_dev_stats_get,
448         .xstats_get                   = i40e_dev_xstats_get,
449         .stats_reset                  = i40e_dev_stats_reset,
450         .xstats_reset                 = i40e_dev_stats_reset,
451         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
452         .dev_infos_get                = i40e_dev_info_get,
453         .vlan_filter_set              = i40e_vlan_filter_set,
454         .vlan_tpid_set                = i40e_vlan_tpid_set,
455         .vlan_offload_set             = i40e_vlan_offload_set,
456         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
457         .vlan_pvid_set                = i40e_vlan_pvid_set,
458         .rx_queue_start               = i40e_dev_rx_queue_start,
459         .rx_queue_stop                = i40e_dev_rx_queue_stop,
460         .tx_queue_start               = i40e_dev_tx_queue_start,
461         .tx_queue_stop                = i40e_dev_tx_queue_stop,
462         .rx_queue_setup               = i40e_dev_rx_queue_setup,
463         .rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
464         .rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
465         .rx_queue_release             = i40e_dev_rx_queue_release,
466         .rx_queue_count               = i40e_dev_rx_queue_count,
467         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
468         .tx_queue_setup               = i40e_dev_tx_queue_setup,
469         .tx_queue_release             = i40e_dev_tx_queue_release,
470         .dev_led_on                   = i40e_dev_led_on,
471         .dev_led_off                  = i40e_dev_led_off,
472         .flow_ctrl_get                = i40e_flow_ctrl_get,
473         .flow_ctrl_set                = i40e_flow_ctrl_set,
474         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
475         .mac_addr_add                 = i40e_macaddr_add,
476         .mac_addr_remove              = i40e_macaddr_remove,
477         .reta_update                  = i40e_dev_rss_reta_update,
478         .reta_query                   = i40e_dev_rss_reta_query,
479         .rss_hash_update              = i40e_dev_rss_hash_update,
480         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
481         .udp_tunnel_add               = i40e_dev_udp_tunnel_add,
482         .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
483         .filter_ctrl                  = i40e_dev_filter_ctrl,
484         .rxq_info_get                 = i40e_rxq_info_get,
485         .txq_info_get                 = i40e_txq_info_get,
486         .mirror_rule_set              = i40e_mirror_rule_set,
487         .mirror_rule_reset            = i40e_mirror_rule_reset,
488         .timesync_enable              = i40e_timesync_enable,
489         .timesync_disable             = i40e_timesync_disable,
490         .timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
491         .timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
492         .get_dcb_info                 = i40e_dev_get_dcb_info,
493         .timesync_adjust_time         = i40e_timesync_adjust_time,
494         .timesync_read_time           = i40e_timesync_read_time,
495         .timesync_write_time          = i40e_timesync_write_time,
496 };
497
498 /* store statistics names and its offset in stats structure */
499 struct rte_i40e_xstats_name_off {
500         char name[RTE_ETH_XSTATS_NAME_SIZE];
501         unsigned offset;
502 };
503
504 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
505         {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
506         {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
507         {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
508         {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
509         {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
510                 rx_unknown_protocol)},
511         {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
512         {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
513         {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
514         {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
515 };
516
517 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
518                 sizeof(rte_i40e_stats_strings[0]))
519
520 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
521         {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
522                 tx_dropped_link_down)},
523         {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
524         {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
525                 illegal_bytes)},
526         {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
527         {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
528                 mac_local_faults)},
529         {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
530                 mac_remote_faults)},
531         {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
532                 rx_length_errors)},
533         {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
534         {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
535         {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
536         {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
537         {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
538         {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
539                 rx_size_127)},
540         {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
541                 rx_size_255)},
542         {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
543                 rx_size_511)},
544         {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
545                 rx_size_1023)},
546         {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
547                 rx_size_1522)},
548         {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
549                 rx_size_big)},
550         {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
551                 rx_undersize)},
552         {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
553                 rx_oversize)},
554         {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
555                 mac_short_packet_dropped)},
556         {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
557                 rx_fragments)},
558         {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
559         {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
560         {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
561                 tx_size_127)},
562         {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
563                 tx_size_255)},
564         {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
565                 tx_size_511)},
566         {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
567                 tx_size_1023)},
568         {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
569                 tx_size_1522)},
570         {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
571                 tx_size_big)},
572         {"rx_flow_director_atr_match_packets",
573                 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
574         {"rx_flow_director_sb_match_packets",
575                 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
576         {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
577                 tx_lpi_status)},
578         {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
579                 rx_lpi_status)},
580         {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
581                 tx_lpi_count)},
582         {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
583                 rx_lpi_count)},
584 };
585
586 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
587                 sizeof(rte_i40e_hw_port_strings[0]))
588
589 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
590         {"xon_packets", offsetof(struct i40e_hw_port_stats,
591                 priority_xon_rx)},
592         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
593                 priority_xoff_rx)},
594 };
595
596 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
597                 sizeof(rte_i40e_rxq_prio_strings[0]))
598
599 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
600         {"xon_packets", offsetof(struct i40e_hw_port_stats,
601                 priority_xon_tx)},
602         {"xoff_packets", offsetof(struct i40e_hw_port_stats,
603                 priority_xoff_tx)},
604         {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
605                 priority_xon_2_xoff)},
606 };
607
608 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
609                 sizeof(rte_i40e_txq_prio_strings[0]))
610
611 static struct eth_driver rte_i40e_pmd = {
612         .pci_drv = {
613                 .name = "rte_i40e_pmd",
614                 .id_table = pci_id_i40e_map,
615                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
616                         RTE_PCI_DRV_DETACHABLE,
617         },
618         .eth_dev_init = eth_i40e_dev_init,
619         .eth_dev_uninit = eth_i40e_dev_uninit,
620         .dev_private_size = sizeof(struct i40e_adapter),
621 };
622
623 static inline int
624 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
625                                      struct rte_eth_link *link)
626 {
627         struct rte_eth_link *dst = link;
628         struct rte_eth_link *src = &(dev->data->dev_link);
629
630         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
631                                         *(uint64_t *)src) == 0)
632                 return -1;
633
634         return 0;
635 }
636
637 static inline int
638 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
639                                       struct rte_eth_link *link)
640 {
641         struct rte_eth_link *dst = &(dev->data->dev_link);
642         struct rte_eth_link *src = link;
643
644         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
645                                         *(uint64_t *)src) == 0)
646                 return -1;
647
648         return 0;
649 }
650
651 /*
652  * Driver initialization routine.
653  * Invoked once at EAL init time.
654  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
655  */
656 static int
657 rte_i40e_pmd_init(const char *name __rte_unused,
658                   const char *params __rte_unused)
659 {
660         PMD_INIT_FUNC_TRACE();
661         rte_eth_driver_register(&rte_i40e_pmd);
662
663         return 0;
664 }
665
666 static struct rte_driver rte_i40e_driver = {
667         .type = PMD_PDEV,
668         .init = rte_i40e_pmd_init,
669 };
670
671 PMD_REGISTER_DRIVER(rte_i40e_driver);
672
673 /*
674  * Initialize registers for flexible payload, which should be set by NVM.
675  * This should be removed from code once it is fixed in NVM.
676  */
677 #ifndef I40E_GLQF_ORT
678 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
679 #endif
680 #ifndef I40E_GLQF_PIT
681 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
682 #endif
683
684 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
685 {
686         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
687         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
688         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
689         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
690         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
691         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
692         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
693         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
694         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
695         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
696
697         /* GLQF_PIT Registers */
698         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
699         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
700 }
701
702 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
703
704 /*
705  * Add a ethertype filter to drop all flow control frames transmitted
706  * from VSIs.
707 */
708 static void
709 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
710 {
711         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
712         uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
713                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
714                         I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
715         int ret;
716
717         ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
718                                 I40E_FLOW_CONTROL_ETHERTYPE, flags,
719                                 pf->main_vsi_seid, 0,
720                                 TRUE, NULL, NULL);
721         if (ret)
722                 PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
723                                   " frames from VSIs.");
724 }
725
726 static int
727 eth_i40e_dev_init(struct rte_eth_dev *dev)
728 {
729         struct rte_pci_device *pci_dev;
730         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
731         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
732         struct i40e_vsi *vsi;
733         int ret;
734         uint32_t len;
735         uint8_t aq_fail = 0;
736
737         PMD_INIT_FUNC_TRACE();
738
739         dev->dev_ops = &i40e_eth_dev_ops;
740         dev->rx_pkt_burst = i40e_recv_pkts;
741         dev->tx_pkt_burst = i40e_xmit_pkts;
742
743         /* for secondary processes, we don't initialise any further as primary
744          * has already done this work. Only check we don't need a different
745          * RX function */
746         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
747                 i40e_set_rx_function(dev);
748                 i40e_set_tx_function(dev);
749                 return 0;
750         }
751         pci_dev = dev->pci_dev;
752
753         rte_eth_copy_pci_info(dev, pci_dev);
754
755         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
756         pf->adapter->eth_dev = dev;
757         pf->dev_data = dev->data;
758
759         hw->back = I40E_PF_TO_ADAPTER(pf);
760         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
761         if (!hw->hw_addr) {
762                 PMD_INIT_LOG(ERR, "Hardware is not available, "
763                              "as address is NULL");
764                 return -ENODEV;
765         }
766
767         hw->vendor_id = pci_dev->id.vendor_id;
768         hw->device_id = pci_dev->id.device_id;
769         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
770         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
771         hw->bus.device = pci_dev->addr.devid;
772         hw->bus.func = pci_dev->addr.function;
773         hw->adapter_stopped = 0;
774
775         /* Make sure all is clean before doing PF reset */
776         i40e_clear_hw(hw);
777
778         /* Initialize the hardware */
779         i40e_hw_init(dev);
780
781         /* Reset here to make sure all is clean for each PF */
782         ret = i40e_pf_reset(hw);
783         if (ret) {
784                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
785                 return ret;
786         }
787
788         /* Initialize the shared code (base driver) */
789         ret = i40e_init_shared_code(hw);
790         if (ret) {
791                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
792                 return ret;
793         }
794
795         /*
796          * To work around the NVM issue,initialize registers
797          * for flexible payload by software.
798          * It should be removed once issues are fixed in NVM.
799          */
800         i40e_flex_payload_reg_init(hw);
801
802         /* Initialize the parameters for adminq */
803         i40e_init_adminq_parameter(hw);
804         ret = i40e_init_adminq(hw);
805         if (ret != I40E_SUCCESS) {
806                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
807                 return -EIO;
808         }
809         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
810                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
811                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
812                      ((hw->nvm.version >> 12) & 0xf),
813                      ((hw->nvm.version >> 4) & 0xff),
814                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
815
816         /* Clear PXE mode */
817         i40e_clear_pxe_mode(hw);
818
819         /*
820          * On X710, performance number is far from the expectation on recent
821          * firmware versions. The fix for this issue may not be integrated in
822          * the following firmware version. So the workaround in software driver
823          * is needed. It needs to modify the initial values of 3 internal only
824          * registers. Note that the workaround can be removed when it is fixed
825          * in firmware in the future.
826          */
827         i40e_configure_registers(hw);
828
829         /* Get hw capabilities */
830         ret = i40e_get_cap(hw);
831         if (ret != I40E_SUCCESS) {
832                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
833                 goto err_get_capabilities;
834         }
835
836         /* Initialize parameters for PF */
837         ret = i40e_pf_parameter_init(dev);
838         if (ret != 0) {
839                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
840                 goto err_parameter_init;
841         }
842
843         /* Initialize the queue management */
844         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
845         if (ret < 0) {
846                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
847                 goto err_qp_pool_init;
848         }
849         ret = i40e_res_pool_init(&pf->msix_pool, 1,
850                                 hw->func_caps.num_msix_vectors - 1);
851         if (ret < 0) {
852                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
853                 goto err_msix_pool_init;
854         }
855
856         /* Initialize lan hmc */
857         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
858                                 hw->func_caps.num_rx_qp, 0, 0);
859         if (ret != I40E_SUCCESS) {
860                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
861                 goto err_init_lan_hmc;
862         }
863
864         /* Configure lan hmc */
865         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
866         if (ret != I40E_SUCCESS) {
867                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
868                 goto err_configure_lan_hmc;
869         }
870
871         /* Get and check the mac address */
872         i40e_get_mac_addr(hw, hw->mac.addr);
873         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
874                 PMD_INIT_LOG(ERR, "mac address is not valid");
875                 ret = -EIO;
876                 goto err_get_mac_addr;
877         }
878         /* Copy the permanent MAC address */
879         ether_addr_copy((struct ether_addr *) hw->mac.addr,
880                         (struct ether_addr *) hw->mac.perm_addr);
881
882         /* Disable flow control */
883         hw->fc.requested_mode = I40E_FC_NONE;
884         i40e_set_fc(hw, &aq_fail, TRUE);
885
886         /* PF setup, which includes VSI setup */
887         ret = i40e_pf_setup(pf);
888         if (ret) {
889                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
890                 goto err_setup_pf_switch;
891         }
892
893         vsi = pf->main_vsi;
894
895         /* Disable double vlan by default */
896         i40e_vsi_config_double_vlan(vsi, FALSE);
897
898         if (!vsi->max_macaddrs)
899                 len = ETHER_ADDR_LEN;
900         else
901                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
902
903         /* Should be after VSI initialized */
904         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
905         if (!dev->data->mac_addrs) {
906                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
907                                         "for storing mac address");
908                 goto err_mac_alloc;
909         }
910         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
911                                         &dev->data->mac_addrs[0]);
912
913         /* initialize pf host driver to setup SRIOV resource if applicable */
914         i40e_pf_host_init(dev);
915
916         /* register callback func to eal lib */
917         rte_intr_callback_register(&(pci_dev->intr_handle),
918                 i40e_dev_interrupt_handler, (void *)dev);
919
920         /* configure and enable device interrupt */
921         i40e_pf_config_irq0(hw, TRUE);
922         i40e_pf_enable_irq0(hw);
923
924         /* enable uio intr after callback register */
925         rte_intr_enable(&(pci_dev->intr_handle));
926         /*
927          * Add an ethertype filter to drop all flow control frames transmitted
928          * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
929          * frames to wire.
930          */
931         i40e_add_tx_flow_control_drop_filter(pf);
932
933         /* Set the max frame size to 0x2600 by default,
934          * in case other drivers changed the default value.
935          */
936         i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
937
938         /* initialize mirror rule list */
939         TAILQ_INIT(&pf->mirror_list);
940
941         /* Init dcb to sw mode by default */
942         ret = i40e_dcb_init_configure(dev, TRUE);
943         if (ret != I40E_SUCCESS) {
944                 PMD_INIT_LOG(INFO, "Failed to init dcb.");
945                 pf->flags &= ~I40E_FLAG_DCB;
946         }
947
948         return 0;
949
950 err_mac_alloc:
951         i40e_vsi_release(pf->main_vsi);
952 err_setup_pf_switch:
953 err_get_mac_addr:
954 err_configure_lan_hmc:
955         (void)i40e_shutdown_lan_hmc(hw);
956 err_init_lan_hmc:
957         i40e_res_pool_destroy(&pf->msix_pool);
958 err_msix_pool_init:
959         i40e_res_pool_destroy(&pf->qp_pool);
960 err_qp_pool_init:
961 err_parameter_init:
962 err_get_capabilities:
963         (void)i40e_shutdown_adminq(hw);
964
965         return ret;
966 }
967
968 static int
969 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
970 {
971         struct rte_pci_device *pci_dev;
972         struct i40e_hw *hw;
973         struct i40e_filter_control_settings settings;
974         int ret;
975         uint8_t aq_fail = 0;
976
977         PMD_INIT_FUNC_TRACE();
978
979         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
980                 return 0;
981
982         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
983         pci_dev = dev->pci_dev;
984
985         if (hw->adapter_stopped == 0)
986                 i40e_dev_close(dev);
987
988         dev->dev_ops = NULL;
989         dev->rx_pkt_burst = NULL;
990         dev->tx_pkt_burst = NULL;
991
992         /* Disable LLDP */
993         ret = i40e_aq_stop_lldp(hw, true, NULL);
994         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
995                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
996
997         /* Clear PXE mode */
998         i40e_clear_pxe_mode(hw);
999
1000         /* Unconfigure filter control */
1001         memset(&settings, 0, sizeof(settings));
1002         ret = i40e_set_filter_control(hw, &settings);
1003         if (ret)
1004                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1005                                         ret);
1006
1007         /* Disable flow control */
1008         hw->fc.requested_mode = I40E_FC_NONE;
1009         i40e_set_fc(hw, &aq_fail, TRUE);
1010
1011         /* uninitialize pf host driver */
1012         i40e_pf_host_uninit(dev);
1013
1014         rte_free(dev->data->mac_addrs);
1015         dev->data->mac_addrs = NULL;
1016
1017         /* disable uio intr before callback unregister */
1018         rte_intr_disable(&(pci_dev->intr_handle));
1019
1020         /* register callback func to eal lib */
1021         rte_intr_callback_unregister(&(pci_dev->intr_handle),
1022                 i40e_dev_interrupt_handler, (void *)dev);
1023
1024         return 0;
1025 }
1026
1027 static int
1028 i40e_dev_configure(struct rte_eth_dev *dev)
1029 {
1030         struct i40e_adapter *ad =
1031                 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1032         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1033         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1034         int i, ret;
1035
1036         /* Initialize to TRUE. If any of Rx queues doesn't meet the
1037          * bulk allocation or vector Rx preconditions we will reset it.
1038          */
1039         ad->rx_bulk_alloc_allowed = true;
1040         ad->rx_vec_allowed = true;
1041         ad->tx_simple_allowed = true;
1042         ad->tx_vec_allowed = true;
1043
1044         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1045                 ret = i40e_fdir_setup(pf);
1046                 if (ret != I40E_SUCCESS) {
1047                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1048                         return -ENOTSUP;
1049                 }
1050                 ret = i40e_fdir_configure(dev);
1051                 if (ret < 0) {
1052                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
1053                         goto err;
1054                 }
1055         } else
1056                 i40e_fdir_teardown(pf);
1057
1058         ret = i40e_dev_init_vlan(dev);
1059         if (ret < 0)
1060                 goto err;
1061
1062         /* VMDQ setup.
1063          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1064          *  RSS setting have different requirements.
1065          *  General PMD driver call sequence are NIC init, configure,
1066          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1067          *  will try to lookup the VSI that specific queue belongs to if VMDQ
1068          *  applicable. So, VMDQ setting has to be done before
1069          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1070          *  For RSS setting, it will try to calculate actual configured RX queue
1071          *  number, which will be available after rx_queue_setup(). dev_start()
1072          *  function is good to place RSS setup.
1073          */
1074         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1075                 ret = i40e_vmdq_setup(dev);
1076                 if (ret)
1077                         goto err;
1078         }
1079
1080         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1081                 ret = i40e_dcb_setup(dev);
1082                 if (ret) {
1083                         PMD_DRV_LOG(ERR, "failed to configure DCB.");
1084                         goto err_dcb;
1085                 }
1086         }
1087
1088         return 0;
1089
1090 err_dcb:
1091         /* need to release vmdq resource if exists */
1092         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1093                 i40e_vsi_release(pf->vmdq[i].vsi);
1094                 pf->vmdq[i].vsi = NULL;
1095         }
1096         rte_free(pf->vmdq);
1097         pf->vmdq = NULL;
1098 err:
1099         /* need to release fdir resource if exists */
1100         i40e_fdir_teardown(pf);
1101         return ret;
1102 }
1103
1104 void
1105 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1106 {
1107         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1108         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1109         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1110         uint16_t msix_vect = vsi->msix_intr;
1111         uint16_t i;
1112
1113         for (i = 0; i < vsi->nb_qps; i++) {
1114                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1115                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1116                 rte_wmb();
1117         }
1118
1119         if (vsi->type != I40E_VSI_SRIOV) {
1120                 if (!rte_intr_allow_others(intr_handle)) {
1121                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1122                                        I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1123                         I40E_WRITE_REG(hw,
1124                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1125                                        0);
1126                 } else {
1127                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1128                                        I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1129                         I40E_WRITE_REG(hw,
1130                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1131                                                        msix_vect - 1), 0);
1132                 }
1133         } else {
1134                 uint32_t reg;
1135                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1136                         vsi->user_param + (msix_vect - 1);
1137
1138                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1139                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1140         }
1141         I40E_WRITE_FLUSH(hw);
1142 }
1143
1144 static void
1145 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1146                        int base_queue, int nb_queue)
1147 {
1148         int i;
1149         uint32_t val;
1150         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1151
1152         /* Bind all RX queues to allocated MSIX interrupt */
1153         for (i = 0; i < nb_queue; i++) {
1154                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1155                         I40E_QINT_RQCTL_ITR_INDX_MASK |
1156                         ((base_queue + i + 1) <<
1157                          I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1158                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1159                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1160
1161                 if (i == nb_queue - 1)
1162                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1163                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1164         }
1165
1166         /* Write first RX queue to Link list register as the head element */
1167         if (vsi->type != I40E_VSI_SRIOV) {
1168                 uint16_t interval =
1169                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1170
1171                 if (msix_vect == I40E_MISC_VEC_ID) {
1172                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1173                                        (base_queue <<
1174                                         I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1175                                        (0x0 <<
1176                                         I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1177                         I40E_WRITE_REG(hw,
1178                                        I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1179                                        interval);
1180                 } else {
1181                         I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1182                                        (base_queue <<
1183                                         I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1184                                        (0x0 <<
1185                                         I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1186                         I40E_WRITE_REG(hw,
1187                                        I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1188                                                        msix_vect - 1),
1189                                        interval);
1190                 }
1191         } else {
1192                 uint32_t reg;
1193
1194                 if (msix_vect == I40E_MISC_VEC_ID) {
1195                         I40E_WRITE_REG(hw,
1196                                        I40E_VPINT_LNKLST0(vsi->user_param),
1197                                        (base_queue <<
1198                                         I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1199                                        (0x0 <<
1200                                         I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1201                 } else {
1202                         /* num_msix_vectors_vf needs to minus irq0 */
1203                         reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1204                                 vsi->user_param + (msix_vect - 1);
1205
1206                         I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1207                                        (base_queue <<
1208                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1209                                        (0x0 <<
1210                                         I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1211                 }
1212         }
1213
1214         I40E_WRITE_FLUSH(hw);
1215 }
1216
1217 void
1218 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1219 {
1220         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1221         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1222         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1223         uint16_t msix_vect = vsi->msix_intr;
1224         uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1225         uint16_t queue_idx = 0;
1226         int record = 0;
1227         uint32_t val;
1228         int i;
1229
1230         for (i = 0; i < vsi->nb_qps; i++) {
1231                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1232                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1233         }
1234
1235         /* INTENA flag is not auto-cleared for interrupt */
1236         val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1237         val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1238                 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1239                 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1240         I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1241
1242         /* VF bind interrupt */
1243         if (vsi->type == I40E_VSI_SRIOV) {
1244                 __vsi_queues_bind_intr(vsi, msix_vect,
1245                                        vsi->base_queue, vsi->nb_qps);
1246                 return;
1247         }
1248
1249         /* PF & VMDq bind interrupt */
1250         if (rte_intr_dp_is_en(intr_handle)) {
1251                 if (vsi->type == I40E_VSI_MAIN) {
1252                         queue_idx = 0;
1253                         record = 1;
1254                 } else if (vsi->type == I40E_VSI_VMDQ2) {
1255                         struct i40e_vsi *main_vsi =
1256                                 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1257                         queue_idx = vsi->base_queue - main_vsi->nb_qps;
1258                         record = 1;
1259                 }
1260         }
1261
1262         for (i = 0; i < vsi->nb_used_qps; i++) {
1263                 if (nb_msix <= 1) {
1264                         if (!rte_intr_allow_others(intr_handle))
1265                                 /* allow to share MISC_VEC_ID */
1266                                 msix_vect = I40E_MISC_VEC_ID;
1267
1268                         /* no enough msix_vect, map all to one */
1269                         __vsi_queues_bind_intr(vsi, msix_vect,
1270                                                vsi->base_queue + i,
1271                                                vsi->nb_used_qps - i);
1272                         for (; !!record && i < vsi->nb_used_qps; i++)
1273                                 intr_handle->intr_vec[queue_idx + i] =
1274                                         msix_vect;
1275                         break;
1276                 }
1277                 /* 1:1 queue/msix_vect mapping */
1278                 __vsi_queues_bind_intr(vsi, msix_vect,
1279                                        vsi->base_queue + i, 1);
1280                 if (!!record)
1281                         intr_handle->intr_vec[queue_idx + i] = msix_vect;
1282
1283                 msix_vect++;
1284                 nb_msix--;
1285         }
1286 }
1287
1288 static void
1289 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1290 {
1291         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1292         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1293         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1294         uint16_t interval = i40e_calc_itr_interval(\
1295                 RTE_LIBRTE_I40E_ITR_INTERVAL);
1296         uint16_t msix_intr, i;
1297
1298         if (rte_intr_allow_others(intr_handle))
1299                 for (i = 0; i < vsi->nb_msix; i++) {
1300                         msix_intr = vsi->msix_intr + i;
1301                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1302                                 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1303                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1304                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1305                                 (interval <<
1306                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1307                 }
1308         else
1309                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1310                                I40E_PFINT_DYN_CTL0_INTENA_MASK |
1311                                I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1312                                (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1313                                (interval <<
1314                                 I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1315
1316         I40E_WRITE_FLUSH(hw);
1317 }
1318
1319 static void
1320 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1321 {
1322         struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1323         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1324         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1325         uint16_t msix_intr, i;
1326
1327         if (rte_intr_allow_others(intr_handle))
1328                 for (i = 0; i < vsi->nb_msix; i++) {
1329                         msix_intr = vsi->msix_intr + i;
1330                         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1331                                        0);
1332                 }
1333         else
1334                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1335
1336         I40E_WRITE_FLUSH(hw);
1337 }
1338
1339 static inline uint8_t
1340 i40e_parse_link_speed(uint16_t eth_link_speed)
1341 {
1342         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1343
1344         switch (eth_link_speed) {
1345         case ETH_LINK_SPEED_40G:
1346                 link_speed = I40E_LINK_SPEED_40GB;
1347                 break;
1348         case ETH_LINK_SPEED_20G:
1349                 link_speed = I40E_LINK_SPEED_20GB;
1350                 break;
1351         case ETH_LINK_SPEED_10G:
1352                 link_speed = I40E_LINK_SPEED_10GB;
1353                 break;
1354         case ETH_LINK_SPEED_1000:
1355                 link_speed = I40E_LINK_SPEED_1GB;
1356                 break;
1357         case ETH_LINK_SPEED_100:
1358                 link_speed = I40E_LINK_SPEED_100MB;
1359                 break;
1360         }
1361
1362         return link_speed;
1363 }
1364
1365 static int
1366 i40e_phy_conf_link(__rte_unused struct i40e_hw *hw,
1367                    __rte_unused uint8_t abilities,
1368                    __rte_unused uint8_t force_speed)
1369 {
1370         /* Skip any phy config on both 10G and 40G interfaces, as a workaround
1371          * for the link control limitation of that all link control should be
1372          * handled by firmware. It should follow up if link control will be
1373          * opened to software driver in future firmware versions.
1374          */
1375         return I40E_SUCCESS;
1376 }
1377
1378 static int
1379 i40e_apply_link_speed(struct rte_eth_dev *dev)
1380 {
1381         uint8_t speed;
1382         uint8_t abilities = 0;
1383         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1384         struct rte_eth_conf *conf = &dev->data->dev_conf;
1385
1386         speed = i40e_parse_link_speed(conf->link_speed);
1387         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1388         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
1389                 abilities |= I40E_AQ_PHY_AN_ENABLED;
1390         else
1391                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
1392
1393         return i40e_phy_conf_link(hw, abilities, speed);
1394 }
1395
1396 static int
1397 i40e_dev_start(struct rte_eth_dev *dev)
1398 {
1399         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1400         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1401         struct i40e_vsi *main_vsi = pf->main_vsi;
1402         int ret, i;
1403         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1404         uint32_t intr_vector = 0;
1405
1406         hw->adapter_stopped = 0;
1407
1408         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
1409                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
1410                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
1411                              dev->data->dev_conf.link_duplex,
1412                              dev->data->port_id);
1413                 return -EINVAL;
1414         }
1415
1416         rte_intr_disable(intr_handle);
1417
1418         if ((rte_intr_cap_multiple(intr_handle) ||
1419              !RTE_ETH_DEV_SRIOV(dev).active) &&
1420             dev->data->dev_conf.intr_conf.rxq != 0) {
1421                 intr_vector = dev->data->nb_rx_queues;
1422                 if (rte_intr_efd_enable(intr_handle, intr_vector))
1423                         return -1;
1424         }
1425
1426         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1427                 intr_handle->intr_vec =
1428                         rte_zmalloc("intr_vec",
1429                                     dev->data->nb_rx_queues * sizeof(int),
1430                                     0);
1431                 if (!intr_handle->intr_vec) {
1432                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1433                                      " intr_vec\n", dev->data->nb_rx_queues);
1434                         return -ENOMEM;
1435                 }
1436         }
1437
1438         /* Initialize VSI */
1439         ret = i40e_dev_rxtx_init(pf);
1440         if (ret != I40E_SUCCESS) {
1441                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1442                 goto err_up;
1443         }
1444
1445         /* Map queues with MSIX interrupt */
1446         main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1447                 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1448         i40e_vsi_queues_bind_intr(main_vsi);
1449         i40e_vsi_enable_queues_intr(main_vsi);
1450
1451         /* Map VMDQ VSI queues with MSIX interrupt */
1452         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1453                 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1454                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1455                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1456         }
1457
1458         /* enable FDIR MSIX interrupt */
1459         if (pf->fdir.fdir_vsi) {
1460                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1461                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1462         }
1463
1464         /* Enable all queues which have been configured */
1465         ret = i40e_dev_switch_queues(pf, TRUE);
1466         if (ret != I40E_SUCCESS) {
1467                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
1468                 goto err_up;
1469         }
1470
1471         /* Enable receiving broadcast packets */
1472         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1473         if (ret != I40E_SUCCESS)
1474                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1475
1476         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1477                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1478                                                 true, NULL);
1479                 if (ret != I40E_SUCCESS)
1480                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1481         }
1482
1483         /* Apply link configure */
1484         ret = i40e_apply_link_speed(dev);
1485         if (I40E_SUCCESS != ret) {
1486                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
1487                 goto err_up;
1488         }
1489
1490         if (!rte_intr_allow_others(intr_handle)) {
1491                 rte_intr_callback_unregister(intr_handle,
1492                                              i40e_dev_interrupt_handler,
1493                                              (void *)dev);
1494                 /* configure and enable device interrupt */
1495                 i40e_pf_config_irq0(hw, FALSE);
1496                 i40e_pf_enable_irq0(hw);
1497
1498                 if (dev->data->dev_conf.intr_conf.lsc != 0)
1499                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
1500                                      " no intr multiplex\n");
1501         }
1502
1503         /* enable uio intr after callback register */
1504         rte_intr_enable(intr_handle);
1505
1506         return I40E_SUCCESS;
1507
1508 err_up:
1509         i40e_dev_switch_queues(pf, FALSE);
1510         i40e_dev_clear_queues(dev);
1511
1512         return ret;
1513 }
1514
1515 static void
1516 i40e_dev_stop(struct rte_eth_dev *dev)
1517 {
1518         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1519         struct i40e_vsi *main_vsi = pf->main_vsi;
1520         struct i40e_mirror_rule *p_mirror;
1521         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1522         int i;
1523
1524         /* Disable all queues */
1525         i40e_dev_switch_queues(pf, FALSE);
1526
1527         /* un-map queues with interrupt registers */
1528         i40e_vsi_disable_queues_intr(main_vsi);
1529         i40e_vsi_queues_unbind_intr(main_vsi);
1530
1531         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1532                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
1533                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
1534         }
1535
1536         if (pf->fdir.fdir_vsi) {
1537                 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
1538                 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
1539         }
1540         /* Clear all queues and release memory */
1541         i40e_dev_clear_queues(dev);
1542
1543         /* Set link down */
1544         i40e_dev_set_link_down(dev);
1545
1546         /* Remove all mirror rules */
1547         while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
1548                 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
1549                 rte_free(p_mirror);
1550         }
1551         pf->nb_mirror_rule = 0;
1552
1553         if (!rte_intr_allow_others(intr_handle))
1554                 /* resume to the default handler */
1555                 rte_intr_callback_register(intr_handle,
1556                                            i40e_dev_interrupt_handler,
1557                                            (void *)dev);
1558
1559         /* Clean datapath event and queue/vec mapping */
1560         rte_intr_efd_disable(intr_handle);
1561         if (intr_handle->intr_vec) {
1562                 rte_free(intr_handle->intr_vec);
1563                 intr_handle->intr_vec = NULL;
1564         }
1565 }
1566
1567 static void
1568 i40e_dev_close(struct rte_eth_dev *dev)
1569 {
1570         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1571         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1572         uint32_t reg;
1573         int i;
1574
1575         PMD_INIT_FUNC_TRACE();
1576
1577         i40e_dev_stop(dev);
1578         hw->adapter_stopped = 1;
1579         i40e_dev_free_queues(dev);
1580
1581         /* Disable interrupt */
1582         i40e_pf_disable_irq0(hw);
1583         rte_intr_disable(&(dev->pci_dev->intr_handle));
1584
1585         /* shutdown and destroy the HMC */
1586         i40e_shutdown_lan_hmc(hw);
1587
1588         /* release all the existing VSIs and VEBs */
1589         i40e_fdir_teardown(pf);
1590         i40e_vsi_release(pf->main_vsi);
1591
1592         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1593                 i40e_vsi_release(pf->vmdq[i].vsi);
1594                 pf->vmdq[i].vsi = NULL;
1595         }
1596
1597         rte_free(pf->vmdq);
1598         pf->vmdq = NULL;
1599
1600         /* shutdown the adminq */
1601         i40e_aq_queue_shutdown(hw, true);
1602         i40e_shutdown_adminq(hw);
1603
1604         i40e_res_pool_destroy(&pf->qp_pool);
1605         i40e_res_pool_destroy(&pf->msix_pool);
1606
1607         /* force a PF reset to clean anything leftover */
1608         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1609         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1610                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1611         I40E_WRITE_FLUSH(hw);
1612 }
1613
1614 static void
1615 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1616 {
1617         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1618         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1619         struct i40e_vsi *vsi = pf->main_vsi;
1620         int status;
1621
1622         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1623                                                         true, NULL);
1624         if (status != I40E_SUCCESS)
1625                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1626
1627         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1628                                                         TRUE, NULL);
1629         if (status != I40E_SUCCESS)
1630                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1631
1632 }
1633
1634 static void
1635 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1636 {
1637         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1638         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1639         struct i40e_vsi *vsi = pf->main_vsi;
1640         int status;
1641
1642         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1643                                                         false, NULL);
1644         if (status != I40E_SUCCESS)
1645                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1646
1647         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1648                                                         false, NULL);
1649         if (status != I40E_SUCCESS)
1650                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1651 }
1652
1653 static void
1654 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1655 {
1656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1657         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1658         struct i40e_vsi *vsi = pf->main_vsi;
1659         int ret;
1660
1661         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1662         if (ret != I40E_SUCCESS)
1663                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1664 }
1665
1666 static void
1667 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1668 {
1669         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1670         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1671         struct i40e_vsi *vsi = pf->main_vsi;
1672         int ret;
1673
1674         if (dev->data->promiscuous == 1)
1675                 return; /* must remain in all_multicast mode */
1676
1677         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1678                                 vsi->seid, FALSE, NULL);
1679         if (ret != I40E_SUCCESS)
1680                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1681 }
1682
1683 /*
1684  * Set device link up.
1685  */
1686 static int
1687 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1688 {
1689         /* re-apply link speed setting */
1690         return i40e_apply_link_speed(dev);
1691 }
1692
1693 /*
1694  * Set device link down.
1695  */
1696 static int
1697 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1698 {
1699         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1700         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1701         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1702
1703         return i40e_phy_conf_link(hw, abilities, speed);
1704 }
1705
1706 int
1707 i40e_dev_link_update(struct rte_eth_dev *dev,
1708                      int wait_to_complete)
1709 {
1710 #define CHECK_INTERVAL 100  /* 100ms */
1711 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1712         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1713         struct i40e_link_status link_status;
1714         struct rte_eth_link link, old;
1715         int status;
1716         unsigned rep_cnt = MAX_REPEAT_TIME;
1717
1718         memset(&link, 0, sizeof(link));
1719         memset(&old, 0, sizeof(old));
1720         memset(&link_status, 0, sizeof(link_status));
1721         rte_i40e_dev_atomic_read_link_status(dev, &old);
1722
1723         do {
1724                 /* Get link status information from hardware */
1725                 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1726                 if (status != I40E_SUCCESS) {
1727                         link.link_speed = ETH_LINK_SPEED_100;
1728                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1729                         PMD_DRV_LOG(ERR, "Failed to get link info");
1730                         goto out;
1731                 }
1732
1733                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1734                 if (!wait_to_complete)
1735                         break;
1736
1737                 rte_delay_ms(CHECK_INTERVAL);
1738         } while (!link.link_status && rep_cnt--);
1739
1740         if (!link.link_status)
1741                 goto out;
1742
1743         /* i40e uses full duplex only */
1744         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1745
1746         /* Parse the link status */
1747         switch (link_status.link_speed) {
1748         case I40E_LINK_SPEED_100MB:
1749                 link.link_speed = ETH_LINK_SPEED_100;
1750                 break;
1751         case I40E_LINK_SPEED_1GB:
1752                 link.link_speed = ETH_LINK_SPEED_1000;
1753                 break;
1754         case I40E_LINK_SPEED_10GB:
1755                 link.link_speed = ETH_LINK_SPEED_10G;
1756                 break;
1757         case I40E_LINK_SPEED_20GB:
1758                 link.link_speed = ETH_LINK_SPEED_20G;
1759                 break;
1760         case I40E_LINK_SPEED_40GB:
1761                 link.link_speed = ETH_LINK_SPEED_40G;
1762                 break;
1763         default:
1764                 link.link_speed = ETH_LINK_SPEED_100;
1765                 break;
1766         }
1767
1768 out:
1769         rte_i40e_dev_atomic_write_link_status(dev, &link);
1770         if (link.link_status == old.link_status)
1771                 return -1;
1772
1773         return 0;
1774 }
1775
1776 /* Get all the statistics of a VSI */
1777 void
1778 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1779 {
1780         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1781         struct i40e_eth_stats *nes = &vsi->eth_stats;
1782         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1783         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1784
1785         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1786                             vsi->offset_loaded, &oes->rx_bytes,
1787                             &nes->rx_bytes);
1788         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1789                             vsi->offset_loaded, &oes->rx_unicast,
1790                             &nes->rx_unicast);
1791         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1792                             vsi->offset_loaded, &oes->rx_multicast,
1793                             &nes->rx_multicast);
1794         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1795                             vsi->offset_loaded, &oes->rx_broadcast,
1796                             &nes->rx_broadcast);
1797         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1798                             &oes->rx_discards, &nes->rx_discards);
1799         /* GLV_REPC not supported */
1800         /* GLV_RMPC not supported */
1801         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1802                             &oes->rx_unknown_protocol,
1803                             &nes->rx_unknown_protocol);
1804         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1805                             vsi->offset_loaded, &oes->tx_bytes,
1806                             &nes->tx_bytes);
1807         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1808                             vsi->offset_loaded, &oes->tx_unicast,
1809                             &nes->tx_unicast);
1810         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1811                             vsi->offset_loaded, &oes->tx_multicast,
1812                             &nes->tx_multicast);
1813         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1814                             vsi->offset_loaded,  &oes->tx_broadcast,
1815                             &nes->tx_broadcast);
1816         /* GLV_TDPC not supported */
1817         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1818                             &oes->tx_errors, &nes->tx_errors);
1819         vsi->offset_loaded = true;
1820
1821         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1822                     vsi->vsi_id);
1823         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
1824         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
1825         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
1826         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
1827         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
1828         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1829                     nes->rx_unknown_protocol);
1830         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
1831         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
1832         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
1833         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
1834         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
1835         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
1836         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1837                     vsi->vsi_id);
1838 }
1839
1840 static void
1841 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
1842 {
1843         unsigned int i;
1844         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1845         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1846
1847         /* Get statistics of struct i40e_eth_stats */
1848         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1849                             I40E_GLPRT_GORCL(hw->port),
1850                             pf->offset_loaded, &os->eth.rx_bytes,
1851                             &ns->eth.rx_bytes);
1852         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1853                             I40E_GLPRT_UPRCL(hw->port),
1854                             pf->offset_loaded, &os->eth.rx_unicast,
1855                             &ns->eth.rx_unicast);
1856         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1857                             I40E_GLPRT_MPRCL(hw->port),
1858                             pf->offset_loaded, &os->eth.rx_multicast,
1859                             &ns->eth.rx_multicast);
1860         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1861                             I40E_GLPRT_BPRCL(hw->port),
1862                             pf->offset_loaded, &os->eth.rx_broadcast,
1863                             &ns->eth.rx_broadcast);
1864         /* Workaround: CRC size should not be included in byte statistics,
1865          * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
1866          */
1867         ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
1868                 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
1869
1870         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1871                             pf->offset_loaded, &os->eth.rx_discards,
1872                             &ns->eth.rx_discards);
1873         /* GLPRT_REPC not supported */
1874         /* GLPRT_RMPC not supported */
1875         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1876                             pf->offset_loaded,
1877                             &os->eth.rx_unknown_protocol,
1878                             &ns->eth.rx_unknown_protocol);
1879         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1880                             I40E_GLPRT_GOTCL(hw->port),
1881                             pf->offset_loaded, &os->eth.tx_bytes,
1882                             &ns->eth.tx_bytes);
1883         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1884                             I40E_GLPRT_UPTCL(hw->port),
1885                             pf->offset_loaded, &os->eth.tx_unicast,
1886                             &ns->eth.tx_unicast);
1887         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1888                             I40E_GLPRT_MPTCL(hw->port),
1889                             pf->offset_loaded, &os->eth.tx_multicast,
1890                             &ns->eth.tx_multicast);
1891         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1892                             I40E_GLPRT_BPTCL(hw->port),
1893                             pf->offset_loaded, &os->eth.tx_broadcast,
1894                             &ns->eth.tx_broadcast);
1895         ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
1896                 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
1897         /* GLPRT_TEPC not supported */
1898
1899         /* additional port specific stats */
1900         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1901                             pf->offset_loaded, &os->tx_dropped_link_down,
1902                             &ns->tx_dropped_link_down);
1903         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1904                             pf->offset_loaded, &os->crc_errors,
1905                             &ns->crc_errors);
1906         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1907                             pf->offset_loaded, &os->illegal_bytes,
1908                             &ns->illegal_bytes);
1909         /* GLPRT_ERRBC not supported */
1910         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1911                             pf->offset_loaded, &os->mac_local_faults,
1912                             &ns->mac_local_faults);
1913         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1914                             pf->offset_loaded, &os->mac_remote_faults,
1915                             &ns->mac_remote_faults);
1916         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1917                             pf->offset_loaded, &os->rx_length_errors,
1918                             &ns->rx_length_errors);
1919         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1920                             pf->offset_loaded, &os->link_xon_rx,
1921                             &ns->link_xon_rx);
1922         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1923                             pf->offset_loaded, &os->link_xoff_rx,
1924                             &ns->link_xoff_rx);
1925         for (i = 0; i < 8; i++) {
1926                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1927                                     pf->offset_loaded,
1928                                     &os->priority_xon_rx[i],
1929                                     &ns->priority_xon_rx[i]);
1930                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1931                                     pf->offset_loaded,
1932                                     &os->priority_xoff_rx[i],
1933                                     &ns->priority_xoff_rx[i]);
1934         }
1935         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1936                             pf->offset_loaded, &os->link_xon_tx,
1937                             &ns->link_xon_tx);
1938         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1939                             pf->offset_loaded, &os->link_xoff_tx,
1940                             &ns->link_xoff_tx);
1941         for (i = 0; i < 8; i++) {
1942                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1943                                     pf->offset_loaded,
1944                                     &os->priority_xon_tx[i],
1945                                     &ns->priority_xon_tx[i]);
1946                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1947                                     pf->offset_loaded,
1948                                     &os->priority_xoff_tx[i],
1949                                     &ns->priority_xoff_tx[i]);
1950                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1951                                     pf->offset_loaded,
1952                                     &os->priority_xon_2_xoff[i],
1953                                     &ns->priority_xon_2_xoff[i]);
1954         }
1955         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1956                             I40E_GLPRT_PRC64L(hw->port),
1957                             pf->offset_loaded, &os->rx_size_64,
1958                             &ns->rx_size_64);
1959         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1960                             I40E_GLPRT_PRC127L(hw->port),
1961                             pf->offset_loaded, &os->rx_size_127,
1962                             &ns->rx_size_127);
1963         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1964                             I40E_GLPRT_PRC255L(hw->port),
1965                             pf->offset_loaded, &os->rx_size_255,
1966                             &ns->rx_size_255);
1967         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1968                             I40E_GLPRT_PRC511L(hw->port),
1969                             pf->offset_loaded, &os->rx_size_511,
1970                             &ns->rx_size_511);
1971         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1972                             I40E_GLPRT_PRC1023L(hw->port),
1973                             pf->offset_loaded, &os->rx_size_1023,
1974                             &ns->rx_size_1023);
1975         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1976                             I40E_GLPRT_PRC1522L(hw->port),
1977                             pf->offset_loaded, &os->rx_size_1522,
1978                             &ns->rx_size_1522);
1979         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1980                             I40E_GLPRT_PRC9522L(hw->port),
1981                             pf->offset_loaded, &os->rx_size_big,
1982                             &ns->rx_size_big);
1983         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1984                             pf->offset_loaded, &os->rx_undersize,
1985                             &ns->rx_undersize);
1986         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1987                             pf->offset_loaded, &os->rx_fragments,
1988                             &ns->rx_fragments);
1989         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1990                             pf->offset_loaded, &os->rx_oversize,
1991                             &ns->rx_oversize);
1992         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1993                             pf->offset_loaded, &os->rx_jabber,
1994                             &ns->rx_jabber);
1995         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1996                             I40E_GLPRT_PTC64L(hw->port),
1997                             pf->offset_loaded, &os->tx_size_64,
1998                             &ns->tx_size_64);
1999         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2000                             I40E_GLPRT_PTC127L(hw->port),
2001                             pf->offset_loaded, &os->tx_size_127,
2002                             &ns->tx_size_127);
2003         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2004                             I40E_GLPRT_PTC255L(hw->port),
2005                             pf->offset_loaded, &os->tx_size_255,
2006                             &ns->tx_size_255);
2007         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2008                             I40E_GLPRT_PTC511L(hw->port),
2009                             pf->offset_loaded, &os->tx_size_511,
2010                             &ns->tx_size_511);
2011         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2012                             I40E_GLPRT_PTC1023L(hw->port),
2013                             pf->offset_loaded, &os->tx_size_1023,
2014                             &ns->tx_size_1023);
2015         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2016                             I40E_GLPRT_PTC1522L(hw->port),
2017                             pf->offset_loaded, &os->tx_size_1522,
2018                             &ns->tx_size_1522);
2019         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2020                             I40E_GLPRT_PTC9522L(hw->port),
2021                             pf->offset_loaded, &os->tx_size_big,
2022                             &ns->tx_size_big);
2023         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2024                            pf->offset_loaded,
2025                            &os->fd_sb_match, &ns->fd_sb_match);
2026         /* GLPRT_MSPDC not supported */
2027         /* GLPRT_XEC not supported */
2028
2029         pf->offset_loaded = true;
2030
2031         if (pf->main_vsi)
2032                 i40e_update_vsi_stats(pf->main_vsi);
2033 }
2034
2035 /* Get all statistics of a port */
2036 static void
2037 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2038 {
2039         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2040         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2041         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2042         unsigned i;
2043
2044         /* call read registers - updates values, now write them to struct */
2045         i40e_read_stats_registers(pf, hw);
2046
2047         stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2048                         pf->main_vsi->eth_stats.rx_multicast +
2049                         pf->main_vsi->eth_stats.rx_broadcast -
2050                         pf->main_vsi->eth_stats.rx_discards;
2051         stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2052                         pf->main_vsi->eth_stats.tx_multicast +
2053                         pf->main_vsi->eth_stats.tx_broadcast;
2054         stats->ibytes   = ns->eth.rx_bytes;
2055         stats->obytes   = ns->eth.tx_bytes;
2056         stats->oerrors  = ns->eth.tx_errors +
2057                         pf->main_vsi->eth_stats.tx_errors;
2058         stats->imcasts  = pf->main_vsi->eth_stats.rx_multicast;
2059
2060         /* Rx Errors */
2061         stats->imissed  = ns->eth.rx_discards +
2062                         pf->main_vsi->eth_stats.rx_discards;
2063         stats->ierrors  = ns->crc_errors +
2064                         ns->rx_length_errors + ns->rx_undersize +
2065                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber +
2066                         stats->imissed;
2067
2068         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2069         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2070         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2071         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2072         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2073         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2074         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2075                     ns->eth.rx_unknown_protocol);
2076         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2077         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2078         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2079         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2080         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2081         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2082
2083         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2084                     ns->tx_dropped_link_down);
2085         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2086         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2087                     ns->illegal_bytes);
2088         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2089         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2090                     ns->mac_local_faults);
2091         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2092                     ns->mac_remote_faults);
2093         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2094                     ns->rx_length_errors);
2095         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2096         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2097         for (i = 0; i < 8; i++) {
2098                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2099                                 i, ns->priority_xon_rx[i]);
2100                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2101                                 i, ns->priority_xoff_rx[i]);
2102         }
2103         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2104         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2105         for (i = 0; i < 8; i++) {
2106                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2107                                 i, ns->priority_xon_tx[i]);
2108                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2109                                 i, ns->priority_xoff_tx[i]);
2110                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2111                                 i, ns->priority_xon_2_xoff[i]);
2112         }
2113         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2114         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2115         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2116         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2117         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2118         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2119         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2120         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2121         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2122         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2123         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2124         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2125         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2126         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2127         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2128         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2129         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2130         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2131         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2132                         ns->mac_short_packet_dropped);
2133         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2134                     ns->checksum_error);
2135         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2136         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2137 }
2138
2139 /* Reset the statistics */
2140 static void
2141 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2142 {
2143         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2144         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2145
2146         /* Mark PF and VSI stats to update the offset, aka "reset" */
2147         pf->offset_loaded = false;
2148         if (pf->main_vsi)
2149                 pf->main_vsi->offset_loaded = false;
2150
2151         /* read the stats, reading current register values into offset */
2152         i40e_read_stats_registers(pf, hw);
2153 }
2154
2155 static uint32_t
2156 i40e_xstats_calc_num(void)
2157 {
2158         return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2159                 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
2160                 (I40E_NB_TXQ_PRIO_XSTATS * 8);
2161 }
2162
2163 static int
2164 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
2165                     unsigned n)
2166 {
2167         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2168         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2169         unsigned i, count, prio;
2170         struct i40e_hw_port_stats *hw_stats = &pf->stats;
2171
2172         count = i40e_xstats_calc_num();
2173         if (n < count)
2174                 return count;
2175
2176         i40e_read_stats_registers(pf, hw);
2177
2178         if (xstats == NULL)
2179                 return 0;
2180
2181         count = 0;
2182
2183         /* Get stats from i40e_eth_stats struct */
2184         for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2185                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2186                          "%s", rte_i40e_stats_strings[i].name);
2187                 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2188                         rte_i40e_stats_strings[i].offset);
2189                 count++;
2190         }
2191
2192         /* Get individiual stats from i40e_hw_port struct */
2193         for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2194                 snprintf(xstats[count].name, sizeof(xstats[count].name),
2195                          "%s", rte_i40e_hw_port_strings[i].name);
2196                 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2197                                 rte_i40e_hw_port_strings[i].offset);
2198                 count++;
2199         }
2200
2201         for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2202                 for (prio = 0; prio < 8; prio++) {
2203                         snprintf(xstats[count].name,
2204                                  sizeof(xstats[count].name),
2205                                  "rx_priority%u_%s", prio,
2206                                  rte_i40e_rxq_prio_strings[i].name);
2207                         xstats[count].value =
2208                                 *(uint64_t *)(((char *)hw_stats) +
2209                                 rte_i40e_rxq_prio_strings[i].offset +
2210                                 (sizeof(uint64_t) * prio));
2211                         count++;
2212                 }
2213         }
2214
2215         for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2216                 for (prio = 0; prio < 8; prio++) {
2217                         snprintf(xstats[count].name,
2218                                  sizeof(xstats[count].name),
2219                                  "tx_priority%u_%s", prio,
2220                                  rte_i40e_txq_prio_strings[i].name);
2221                         xstats[count].value =
2222                                 *(uint64_t *)(((char *)hw_stats) +
2223                                 rte_i40e_txq_prio_strings[i].offset +
2224                                 (sizeof(uint64_t) * prio));
2225                         count++;
2226                 }
2227         }
2228
2229         return count;
2230 }
2231
2232 static int
2233 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2234                                  __rte_unused uint16_t queue_id,
2235                                  __rte_unused uint8_t stat_idx,
2236                                  __rte_unused uint8_t is_rx)
2237 {
2238         PMD_INIT_FUNC_TRACE();
2239
2240         return -ENOSYS;
2241 }
2242
2243 static void
2244 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2245 {
2246         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2247         struct i40e_vsi *vsi = pf->main_vsi;
2248
2249         dev_info->max_rx_queues = vsi->nb_qps;
2250         dev_info->max_tx_queues = vsi->nb_qps;
2251         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2252         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2253         dev_info->max_mac_addrs = vsi->max_macaddrs;
2254         dev_info->max_vfs = dev->pci_dev->max_vfs;
2255         dev_info->rx_offload_capa =
2256                 DEV_RX_OFFLOAD_VLAN_STRIP |
2257                 DEV_RX_OFFLOAD_QINQ_STRIP |
2258                 DEV_RX_OFFLOAD_IPV4_CKSUM |
2259                 DEV_RX_OFFLOAD_UDP_CKSUM |
2260                 DEV_RX_OFFLOAD_TCP_CKSUM;
2261         dev_info->tx_offload_capa =
2262                 DEV_TX_OFFLOAD_VLAN_INSERT |
2263                 DEV_TX_OFFLOAD_QINQ_INSERT |
2264                 DEV_TX_OFFLOAD_IPV4_CKSUM |
2265                 DEV_TX_OFFLOAD_UDP_CKSUM |
2266                 DEV_TX_OFFLOAD_TCP_CKSUM |
2267                 DEV_TX_OFFLOAD_SCTP_CKSUM |
2268                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2269                 DEV_TX_OFFLOAD_TCP_TSO;
2270         dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2271                                                 sizeof(uint32_t);
2272         dev_info->reta_size = pf->hash_lut_size;
2273         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2274
2275         dev_info->default_rxconf = (struct rte_eth_rxconf) {
2276                 .rx_thresh = {
2277                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
2278                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
2279                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
2280                 },
2281                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2282                 .rx_drop_en = 0,
2283         };
2284
2285         dev_info->default_txconf = (struct rte_eth_txconf) {
2286                 .tx_thresh = {
2287                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
2288                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
2289                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
2290                 },
2291                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2292                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2293                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2294                                 ETH_TXQ_FLAGS_NOOFFLOADS,
2295         };
2296
2297         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2298                 .nb_max = I40E_MAX_RING_DESC,
2299                 .nb_min = I40E_MIN_RING_DESC,
2300                 .nb_align = I40E_ALIGN_RING_DESC,
2301         };
2302
2303         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2304                 .nb_max = I40E_MAX_RING_DESC,
2305                 .nb_min = I40E_MIN_RING_DESC,
2306                 .nb_align = I40E_ALIGN_RING_DESC,
2307         };
2308
2309         if (pf->flags & I40E_FLAG_VMDQ) {
2310                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2311                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2312                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2313                                                 pf->max_nb_vmdq_vsi;
2314                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2315                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2316                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2317         }
2318 }
2319
2320 static int
2321 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2322 {
2323         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2324         struct i40e_vsi *vsi = pf->main_vsi;
2325         PMD_INIT_FUNC_TRACE();
2326
2327         if (on)
2328                 return i40e_vsi_add_vlan(vsi, vlan_id);
2329         else
2330                 return i40e_vsi_delete_vlan(vsi, vlan_id);
2331 }
2332
2333 static void
2334 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
2335                    __rte_unused uint16_t tpid)
2336 {
2337         PMD_INIT_FUNC_TRACE();
2338 }
2339
2340 static void
2341 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2342 {
2343         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2344         struct i40e_vsi *vsi = pf->main_vsi;
2345
2346         if (mask & ETH_VLAN_STRIP_MASK) {
2347                 /* Enable or disable VLAN stripping */
2348                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2349                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
2350                 else
2351                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
2352         }
2353
2354         if (mask & ETH_VLAN_EXTEND_MASK) {
2355                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
2356                         i40e_vsi_config_double_vlan(vsi, TRUE);
2357                 else
2358                         i40e_vsi_config_double_vlan(vsi, FALSE);
2359         }
2360 }
2361
2362 static void
2363 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
2364                           __rte_unused uint16_t queue,
2365                           __rte_unused int on)
2366 {
2367         PMD_INIT_FUNC_TRACE();
2368 }
2369
2370 static int
2371 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2372 {
2373         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2374         struct i40e_vsi *vsi = pf->main_vsi;
2375         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
2376         struct i40e_vsi_vlan_pvid_info info;
2377
2378         memset(&info, 0, sizeof(info));
2379         info.on = on;
2380         if (info.on)
2381                 info.config.pvid = pvid;
2382         else {
2383                 info.config.reject.tagged =
2384                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
2385                 info.config.reject.untagged =
2386                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
2387         }
2388
2389         return i40e_vsi_vlan_pvid_set(vsi, &info);
2390 }
2391
2392 static int
2393 i40e_dev_led_on(struct rte_eth_dev *dev)
2394 {
2395         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2396         uint32_t mode = i40e_led_get(hw);
2397
2398         if (mode == 0)
2399                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
2400
2401         return 0;
2402 }
2403
2404 static int
2405 i40e_dev_led_off(struct rte_eth_dev *dev)
2406 {
2407         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2408         uint32_t mode = i40e_led_get(hw);
2409
2410         if (mode != 0)
2411                 i40e_led_set(hw, 0, false);
2412
2413         return 0;
2414 }
2415
2416 static int
2417 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2418 {
2419         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2420         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2421
2422         fc_conf->pause_time = pf->fc_conf.pause_time;
2423         fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
2424         fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
2425
2426          /* Return current mode according to actual setting*/
2427         switch (hw->fc.current_mode) {
2428         case I40E_FC_FULL:
2429                 fc_conf->mode = RTE_FC_FULL;
2430                 break;
2431         case I40E_FC_TX_PAUSE:
2432                 fc_conf->mode = RTE_FC_TX_PAUSE;
2433                 break;
2434         case I40E_FC_RX_PAUSE:
2435                 fc_conf->mode = RTE_FC_RX_PAUSE;
2436                 break;
2437         case I40E_FC_NONE:
2438         default:
2439                 fc_conf->mode = RTE_FC_NONE;
2440         };
2441
2442         return 0;
2443 }
2444
2445 static int
2446 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2447 {
2448         uint32_t mflcn_reg, fctrl_reg, reg;
2449         uint32_t max_high_water;
2450         uint8_t i, aq_failure;
2451         int err;
2452         struct i40e_hw *hw;
2453         struct i40e_pf *pf;
2454         enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
2455                 [RTE_FC_NONE] = I40E_FC_NONE,
2456                 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
2457                 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
2458                 [RTE_FC_FULL] = I40E_FC_FULL
2459         };
2460
2461         /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
2462
2463         max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
2464         if ((fc_conf->high_water > max_high_water) ||
2465                         (fc_conf->high_water < fc_conf->low_water)) {
2466                 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
2467                         "High_water must <= %d.", max_high_water);
2468                 return -EINVAL;
2469         }
2470
2471         hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2472         pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2473         hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
2474
2475         pf->fc_conf.pause_time = fc_conf->pause_time;
2476         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
2477         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
2478
2479         PMD_INIT_FUNC_TRACE();
2480
2481         /* All the link flow control related enable/disable register
2482          * configuration is handle by the F/W
2483          */
2484         err = i40e_set_fc(hw, &aq_failure, true);
2485         if (err < 0)
2486                 return -ENOSYS;
2487
2488         if (i40e_is_40G_device(hw->device_id)) {
2489                 /* Configure flow control refresh threshold,
2490                  * the value for stat_tx_pause_refresh_timer[8]
2491                  * is used for global pause operation.
2492                  */
2493
2494                 I40E_WRITE_REG(hw,
2495                                I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
2496                                pf->fc_conf.pause_time);
2497
2498                 /* configure the timer value included in transmitted pause
2499                  * frame,
2500                  * the value for stat_tx_pause_quanta[8] is used for global
2501                  * pause operation
2502                  */
2503                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
2504                                pf->fc_conf.pause_time);
2505
2506                 fctrl_reg = I40E_READ_REG(hw,
2507                                           I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
2508
2509                 if (fc_conf->mac_ctrl_frame_fwd != 0)
2510                         fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
2511                 else
2512                         fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
2513
2514                 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
2515                                fctrl_reg);
2516         } else {
2517                 /* Configure pause time (2 TCs per register) */
2518                 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
2519                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
2520                         I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
2521
2522                 /* Configure flow control refresh threshold value */
2523                 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
2524                                pf->fc_conf.pause_time / 2);
2525
2526                 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2527
2528                 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
2529                  *depending on configuration
2530                  */
2531                 if (fc_conf->mac_ctrl_frame_fwd != 0) {
2532                         mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
2533                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
2534                 } else {
2535                         mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
2536                         mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
2537                 }
2538
2539                 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
2540         }
2541
2542         /* config the water marker both based on the packets and bytes */
2543         I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
2544                        (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2545                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2546         I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
2547                        (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2548                        << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2549         I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
2550                        pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2551                        << I40E_KILOSHIFT);
2552         I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
2553                        pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2554                        << I40E_KILOSHIFT);
2555
2556         I40E_WRITE_FLUSH(hw);
2557
2558         return 0;
2559 }
2560
2561 static int
2562 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
2563                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
2564 {
2565         PMD_INIT_FUNC_TRACE();
2566
2567         return -ENOSYS;
2568 }
2569
2570 /* Add a MAC address, and update filters */
2571 static void
2572 i40e_macaddr_add(struct rte_eth_dev *dev,
2573                  struct ether_addr *mac_addr,
2574                  __rte_unused uint32_t index,
2575                  uint32_t pool)
2576 {
2577         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2578         struct i40e_mac_filter_info mac_filter;
2579         struct i40e_vsi *vsi;
2580         int ret;
2581
2582         /* If VMDQ not enabled or configured, return */
2583         if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
2584                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
2585                         pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
2586                         pool);
2587                 return;
2588         }
2589
2590         if (pool > pf->nb_cfg_vmdq_vsi) {
2591                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
2592                                 pool, pf->nb_cfg_vmdq_vsi);
2593                 return;
2594         }
2595
2596         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
2597         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2598
2599         if (pool == 0)
2600                 vsi = pf->main_vsi;
2601         else
2602                 vsi = pf->vmdq[pool - 1].vsi;
2603
2604         ret = i40e_vsi_add_mac(vsi, &mac_filter);
2605         if (ret != I40E_SUCCESS) {
2606                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2607                 return;
2608         }
2609 }
2610
2611 /* Remove a MAC address, and update filters */
2612 static void
2613 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2614 {
2615         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2616         struct i40e_vsi *vsi;
2617         struct rte_eth_dev_data *data = dev->data;
2618         struct ether_addr *macaddr;
2619         int ret;
2620         uint32_t i;
2621         uint64_t pool_sel;
2622
2623         macaddr = &(data->mac_addrs[index]);
2624
2625         pool_sel = dev->data->mac_pool_sel[index];
2626
2627         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
2628                 if (pool_sel & (1ULL << i)) {
2629                         if (i == 0)
2630                                 vsi = pf->main_vsi;
2631                         else {
2632                                 /* No VMDQ pool enabled or configured */
2633                                 if (!(pf->flags | I40E_FLAG_VMDQ) ||
2634                                         (i > pf->nb_cfg_vmdq_vsi)) {
2635                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
2636                                                         "/configured");
2637                                         return;
2638                                 }
2639                                 vsi = pf->vmdq[i - 1].vsi;
2640                         }
2641                         ret = i40e_vsi_delete_mac(vsi, macaddr);
2642
2643                         if (ret) {
2644                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
2645                                 return;
2646                         }
2647                 }
2648         }
2649 }
2650
2651 /* Set perfect match or hash match of MAC and VLAN for a VF */
2652 static int
2653 i40e_vf_mac_filter_set(struct i40e_pf *pf,
2654                  struct rte_eth_mac_filter *filter,
2655                  bool add)
2656 {
2657         struct i40e_hw *hw;
2658         struct i40e_mac_filter_info mac_filter;
2659         struct ether_addr old_mac;
2660         struct ether_addr *new_mac;
2661         struct i40e_pf_vf *vf = NULL;
2662         uint16_t vf_id;
2663         int ret;
2664
2665         if (pf == NULL) {
2666                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
2667                 return -EINVAL;
2668         }
2669         hw = I40E_PF_TO_HW(pf);
2670
2671         if (filter == NULL) {
2672                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
2673                 return -EINVAL;
2674         }
2675
2676         new_mac = &filter->mac_addr;
2677
2678         if (is_zero_ether_addr(new_mac)) {
2679                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
2680                 return -EINVAL;
2681         }
2682
2683         vf_id = filter->dst_id;
2684
2685         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
2686                 PMD_DRV_LOG(ERR, "Invalid argument.");
2687                 return -EINVAL;
2688         }
2689         vf = &pf->vfs[vf_id];
2690
2691         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
2692                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
2693                 return -EINVAL;
2694         }
2695
2696         if (add) {
2697                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
2698                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
2699                                 ETHER_ADDR_LEN);
2700                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
2701                                  ETHER_ADDR_LEN);
2702
2703                 mac_filter.filter_type = filter->filter_type;
2704                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
2705                 if (ret != I40E_SUCCESS) {
2706                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2707                         return -1;
2708                 }
2709                 ether_addr_copy(new_mac, &pf->dev_addr);
2710         } else {
2711                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
2712                                 ETHER_ADDR_LEN);
2713                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
2714                 if (ret != I40E_SUCCESS) {
2715                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
2716                         return -1;
2717                 }
2718
2719                 /* Clear device address as it has been removed */
2720                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
2721                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
2722         }
2723
2724         return 0;
2725 }
2726
2727 /* MAC filter handle */
2728 static int
2729 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
2730                 void *arg)
2731 {
2732         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2733         struct rte_eth_mac_filter *filter;
2734         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2735         int ret = I40E_NOT_SUPPORTED;
2736
2737         filter = (struct rte_eth_mac_filter *)(arg);
2738
2739         switch (filter_op) {
2740         case RTE_ETH_FILTER_NOP:
2741                 ret = I40E_SUCCESS;
2742                 break;
2743         case RTE_ETH_FILTER_ADD:
2744                 i40e_pf_disable_irq0(hw);
2745                 if (filter->is_vf)
2746                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
2747                 i40e_pf_enable_irq0(hw);
2748                 break;
2749         case RTE_ETH_FILTER_DELETE:
2750                 i40e_pf_disable_irq0(hw);
2751                 if (filter->is_vf)
2752                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
2753                 i40e_pf_enable_irq0(hw);
2754                 break;
2755         default:
2756                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2757                 ret = I40E_ERR_PARAM;
2758                 break;
2759         }
2760
2761         return ret;
2762 }
2763
2764 static int
2765 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2766 {
2767         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2768         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2769         int ret;
2770
2771         if (!lut)
2772                 return -EINVAL;
2773
2774         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2775                 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
2776                                           lut, lut_size);
2777                 if (ret) {
2778                         PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2779                         return ret;
2780                 }
2781         } else {
2782                 uint32_t *lut_dw = (uint32_t *)lut;
2783                 uint16_t i, lut_size_dw = lut_size / 4;
2784
2785                 for (i = 0; i < lut_size_dw; i++)
2786                         lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
2787         }
2788
2789         return 0;
2790 }
2791
2792 static int
2793 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2794 {
2795         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2796         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2797         int ret;
2798
2799         if (!vsi || !lut)
2800                 return -EINVAL;
2801
2802         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2803                 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
2804                                           lut, lut_size);
2805                 if (ret) {
2806                         PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2807                         return ret;
2808                 }
2809         } else {
2810                 uint32_t *lut_dw = (uint32_t *)lut;
2811                 uint16_t i, lut_size_dw = lut_size / 4;
2812
2813                 for (i = 0; i < lut_size_dw; i++)
2814                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
2815                 I40E_WRITE_FLUSH(hw);
2816         }
2817
2818         return 0;
2819 }
2820
2821 static int
2822 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
2823                          struct rte_eth_rss_reta_entry64 *reta_conf,
2824                          uint16_t reta_size)
2825 {
2826         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2827         uint16_t i, lut_size = pf->hash_lut_size;
2828         uint16_t idx, shift;
2829         uint8_t *lut;
2830         int ret;
2831
2832         if (reta_size != lut_size ||
2833                 reta_size > ETH_RSS_RETA_SIZE_512) {
2834                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2835                         "(%d) doesn't match the number hardware can supported "
2836                                         "(%d)\n", reta_size, lut_size);
2837                 return -EINVAL;
2838         }
2839
2840         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2841         if (!lut) {
2842                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2843                 return -ENOMEM;
2844         }
2845         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
2846         if (ret)
2847                 goto out;
2848         for (i = 0; i < reta_size; i++) {
2849                 idx = i / RTE_RETA_GROUP_SIZE;
2850                 shift = i % RTE_RETA_GROUP_SIZE;
2851                 if (reta_conf[idx].mask & (1ULL << shift))
2852                         lut[i] = reta_conf[idx].reta[shift];
2853         }
2854         ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
2855
2856 out:
2857         rte_free(lut);
2858
2859         return ret;
2860 }
2861
2862 static int
2863 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
2864                         struct rte_eth_rss_reta_entry64 *reta_conf,
2865                         uint16_t reta_size)
2866 {
2867         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2868         uint16_t i, lut_size = pf->hash_lut_size;
2869         uint16_t idx, shift;
2870         uint8_t *lut;
2871         int ret;
2872
2873         if (reta_size != lut_size ||
2874                 reta_size > ETH_RSS_RETA_SIZE_512) {
2875                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2876                         "(%d) doesn't match the number hardware can supported "
2877                                         "(%d)\n", reta_size, lut_size);
2878                 return -EINVAL;
2879         }
2880
2881         lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2882         if (!lut) {
2883                 PMD_DRV_LOG(ERR, "No memory can be allocated");
2884                 return -ENOMEM;
2885         }
2886
2887         ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
2888         if (ret)
2889                 goto out;
2890         for (i = 0; i < reta_size; i++) {
2891                 idx = i / RTE_RETA_GROUP_SIZE;
2892                 shift = i % RTE_RETA_GROUP_SIZE;
2893                 if (reta_conf[idx].mask & (1ULL << shift))
2894                         reta_conf[idx].reta[shift] = lut[i];
2895         }
2896
2897 out:
2898         rte_free(lut);
2899
2900         return ret;
2901 }
2902
2903 /**
2904  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
2905  * @hw:   pointer to the HW structure
2906  * @mem:  pointer to mem struct to fill out
2907  * @size: size of memory requested
2908  * @alignment: what to align the allocation to
2909  **/
2910 enum i40e_status_code
2911 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2912                         struct i40e_dma_mem *mem,
2913                         u64 size,
2914                         u32 alignment)
2915 {
2916         const struct rte_memzone *mz = NULL;
2917         char z_name[RTE_MEMZONE_NAMESIZE];
2918
2919         if (!mem)
2920                 return I40E_ERR_PARAM;
2921
2922         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
2923         mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
2924                                          alignment, RTE_PGSIZE_2M);
2925         if (!mz)
2926                 return I40E_ERR_NO_MEMORY;
2927
2928         mem->size = size;
2929         mem->va = mz->addr;
2930         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2931         mem->zone = (const void *)mz;
2932         PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
2933                     "%"PRIu64, mz->name, mem->pa);
2934
2935         return I40E_SUCCESS;
2936 }
2937
2938 /**
2939  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
2940  * @hw:   pointer to the HW structure
2941  * @mem:  ptr to mem struct to free
2942  **/
2943 enum i40e_status_code
2944 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2945                     struct i40e_dma_mem *mem)
2946 {
2947         if (!mem)
2948                 return I40E_ERR_PARAM;
2949
2950         PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
2951                     "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
2952                     mem->pa);
2953         rte_memzone_free((const struct rte_memzone *)mem->zone);
2954         mem->zone = NULL;
2955         mem->va = NULL;
2956         mem->pa = (u64)0;
2957
2958         return I40E_SUCCESS;
2959 }
2960
2961 /**
2962  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
2963  * @hw:   pointer to the HW structure
2964  * @mem:  pointer to mem struct to fill out
2965  * @size: size of memory requested
2966  **/
2967 enum i40e_status_code
2968 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2969                          struct i40e_virt_mem *mem,
2970                          u32 size)
2971 {
2972         if (!mem)
2973                 return I40E_ERR_PARAM;
2974
2975         mem->size = size;
2976         mem->va = rte_zmalloc("i40e", size, 0);
2977
2978         if (mem->va)
2979                 return I40E_SUCCESS;
2980         else
2981                 return I40E_ERR_NO_MEMORY;
2982 }
2983
2984 /**
2985  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
2986  * @hw:   pointer to the HW structure
2987  * @mem:  pointer to mem struct to free
2988  **/
2989 enum i40e_status_code
2990 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2991                      struct i40e_virt_mem *mem)
2992 {
2993         if (!mem)
2994                 return I40E_ERR_PARAM;
2995
2996         rte_free(mem->va);
2997         mem->va = NULL;
2998
2999         return I40E_SUCCESS;
3000 }
3001
3002 void
3003 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3004 {
3005         rte_spinlock_init(&sp->spinlock);
3006 }
3007
3008 void
3009 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3010 {
3011         rte_spinlock_lock(&sp->spinlock);
3012 }
3013
3014 void
3015 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3016 {
3017         rte_spinlock_unlock(&sp->spinlock);
3018 }
3019
3020 void
3021 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3022 {
3023         return;
3024 }
3025
3026 /**
3027  * Get the hardware capabilities, which will be parsed
3028  * and saved into struct i40e_hw.
3029  */
3030 static int
3031 i40e_get_cap(struct i40e_hw *hw)
3032 {
3033         struct i40e_aqc_list_capabilities_element_resp *buf;
3034         uint16_t len, size = 0;
3035         int ret;
3036
3037         /* Calculate a huge enough buff for saving response data temporarily */
3038         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3039                                                 I40E_MAX_CAP_ELE_NUM;
3040         buf = rte_zmalloc("i40e", len, 0);
3041         if (!buf) {
3042                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
3043                 return I40E_ERR_NO_MEMORY;
3044         }
3045
3046         /* Get, parse the capabilities and save it to hw */
3047         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3048                         i40e_aqc_opc_list_func_capabilities, NULL);
3049         if (ret != I40E_SUCCESS)
3050                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3051
3052         /* Free the temporary buffer after being used */
3053         rte_free(buf);
3054
3055         return ret;
3056 }
3057
3058 static int
3059 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3060 {
3061         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3062         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3063         uint16_t qp_count = 0, vsi_count = 0;
3064
3065         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3066                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3067                 return -EINVAL;
3068         }
3069         /* Add the parameter init for LFC */
3070         pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3071         pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3072         pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3073
3074         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3075         pf->max_num_vsi = hw->func_caps.num_vsis;
3076         pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3077         pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3078         pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3079
3080         /* FDir queue/VSI allocation */
3081         pf->fdir_qp_offset = 0;
3082         if (hw->func_caps.fd) {
3083                 pf->flags |= I40E_FLAG_FDIR;
3084                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3085         } else {
3086                 pf->fdir_nb_qps = 0;
3087         }
3088         qp_count += pf->fdir_nb_qps;
3089         vsi_count += 1;
3090
3091         /* LAN queue/VSI allocation */
3092         pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3093         if (!hw->func_caps.rss) {
3094                 pf->lan_nb_qps = 1;
3095         } else {
3096                 pf->flags |= I40E_FLAG_RSS;
3097                 if (hw->mac.type == I40E_MAC_X722)
3098                         pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3099                 pf->lan_nb_qps = pf->lan_nb_qp_max;
3100         }
3101         qp_count += pf->lan_nb_qps;
3102         vsi_count += 1;
3103
3104         /* VF queue/VSI allocation */
3105         pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3106         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
3107                 pf->flags |= I40E_FLAG_SRIOV;
3108                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3109                 pf->vf_num = dev->pci_dev->max_vfs;
3110                 PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
3111                             "in total %u queues", pf->vf_num, pf->vf_nb_qps,
3112                             pf->vf_nb_qps * pf->vf_num);
3113         } else {
3114                 pf->vf_nb_qps = 0;
3115                 pf->vf_num = 0;
3116         }
3117         qp_count += pf->vf_nb_qps * pf->vf_num;
3118         vsi_count += pf->vf_num;
3119
3120         /* VMDq queue/VSI allocation */
3121         pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3122         pf->vmdq_nb_qps = 0;
3123         pf->max_nb_vmdq_vsi = 0;
3124         if (hw->func_caps.vmdq) {
3125                 if (qp_count < hw->func_caps.num_tx_qp &&
3126                         vsi_count < hw->func_caps.num_vsis) {
3127                         pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
3128                                 qp_count) / pf->vmdq_nb_qp_max;
3129
3130                         /* Limit the maximum number of VMDq vsi to the maximum
3131                          * ethdev can support
3132                          */
3133                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3134                                 hw->func_caps.num_vsis - vsi_count);
3135                         pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3136                                 ETH_64_POOLS);
3137                         if (pf->max_nb_vmdq_vsi) {
3138                                 pf->flags |= I40E_FLAG_VMDQ;
3139                                 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3140                                 PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
3141                                             "per VMDQ VSI, in total %u queues",
3142                                             pf->max_nb_vmdq_vsi,
3143                                             pf->vmdq_nb_qps, pf->vmdq_nb_qps *
3144                                             pf->max_nb_vmdq_vsi);
3145                         } else {
3146                                 PMD_DRV_LOG(INFO, "No enough queues left for "
3147                                             "VMDq");
3148                         }
3149                 } else {
3150                         PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
3151                 }
3152         }
3153         qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3154         vsi_count += pf->max_nb_vmdq_vsi;
3155
3156         if (hw->func_caps.dcb)
3157                 pf->flags |= I40E_FLAG_DCB;
3158
3159         if (qp_count > hw->func_caps.num_tx_qp) {
3160                 PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
3161                             "the hardware maximum %u", qp_count,
3162                             hw->func_caps.num_tx_qp);
3163                 return -EINVAL;
3164         }
3165         if (vsi_count > hw->func_caps.num_vsis) {
3166                 PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
3167                             "the hardware maximum %u", vsi_count,
3168                             hw->func_caps.num_vsis);
3169                 return -EINVAL;
3170         }
3171
3172         return 0;
3173 }
3174
3175 static int
3176 i40e_pf_get_switch_config(struct i40e_pf *pf)
3177 {
3178         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3179         struct i40e_aqc_get_switch_config_resp *switch_config;
3180         struct i40e_aqc_switch_config_element_resp *element;
3181         uint16_t start_seid = 0, num_reported;
3182         int ret;
3183
3184         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3185                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3186         if (!switch_config) {
3187                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
3188                 return -ENOMEM;
3189         }
3190
3191         /* Get the switch configurations */
3192         ret = i40e_aq_get_switch_config(hw, switch_config,
3193                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
3194         if (ret != I40E_SUCCESS) {
3195                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3196                 goto fail;
3197         }
3198         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3199         if (num_reported != 1) { /* The number should be 1 */
3200                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3201                 goto fail;
3202         }
3203
3204         /* Parse the switch configuration elements */
3205         element = &(switch_config->element[0]);
3206         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3207                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3208                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3209         } else
3210                 PMD_DRV_LOG(INFO, "Unknown element type");
3211
3212 fail:
3213         rte_free(switch_config);
3214
3215         return ret;
3216 }
3217
3218 static int
3219 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
3220                         uint32_t num)
3221 {
3222         struct pool_entry *entry;
3223
3224         if (pool == NULL || num == 0)
3225                 return -EINVAL;
3226
3227         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
3228         if (entry == NULL) {
3229                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
3230                 return -ENOMEM;
3231         }
3232
3233         /* queue heap initialize */
3234         pool->num_free = num;
3235         pool->num_alloc = 0;
3236         pool->base = base;
3237         LIST_INIT(&pool->alloc_list);
3238         LIST_INIT(&pool->free_list);
3239
3240         /* Initialize element  */
3241         entry->base = 0;
3242         entry->len = num;
3243
3244         LIST_INSERT_HEAD(&pool->free_list, entry, next);
3245         return 0;
3246 }
3247
3248 static void
3249 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
3250 {
3251         struct pool_entry *entry;
3252
3253         if (pool == NULL)
3254                 return;
3255
3256         LIST_FOREACH(entry, &pool->alloc_list, next) {
3257                 LIST_REMOVE(entry, next);
3258                 rte_free(entry);
3259         }
3260
3261         LIST_FOREACH(entry, &pool->free_list, next) {
3262                 LIST_REMOVE(entry, next);
3263                 rte_free(entry);
3264         }
3265
3266         pool->num_free = 0;
3267         pool->num_alloc = 0;
3268         pool->base = 0;
3269         LIST_INIT(&pool->alloc_list);
3270         LIST_INIT(&pool->free_list);
3271 }
3272
3273 static int
3274 i40e_res_pool_free(struct i40e_res_pool_info *pool,
3275                        uint32_t base)
3276 {
3277         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
3278         uint32_t pool_offset;
3279         int insert;
3280
3281         if (pool == NULL) {
3282                 PMD_DRV_LOG(ERR, "Invalid parameter");
3283                 return -EINVAL;
3284         }
3285
3286         pool_offset = base - pool->base;
3287         /* Lookup in alloc list */
3288         LIST_FOREACH(entry, &pool->alloc_list, next) {
3289                 if (entry->base == pool_offset) {
3290                         valid_entry = entry;
3291                         LIST_REMOVE(entry, next);
3292                         break;
3293                 }
3294         }
3295
3296         /* Not find, return */
3297         if (valid_entry == NULL) {
3298                 PMD_DRV_LOG(ERR, "Failed to find entry");
3299                 return -EINVAL;
3300         }
3301
3302         /**
3303          * Found it, move it to free list  and try to merge.
3304          * In order to make merge easier, always sort it by qbase.
3305          * Find adjacent prev and last entries.
3306          */
3307         prev = next = NULL;
3308         LIST_FOREACH(entry, &pool->free_list, next) {
3309                 if (entry->base > valid_entry->base) {
3310                         next = entry;
3311                         break;
3312                 }
3313                 prev = entry;
3314         }
3315
3316         insert = 0;
3317         /* Try to merge with next one*/
3318         if (next != NULL) {
3319                 /* Merge with next one */
3320                 if (valid_entry->base + valid_entry->len == next->base) {
3321                         next->base = valid_entry->base;
3322                         next->len += valid_entry->len;
3323                         rte_free(valid_entry);
3324                         valid_entry = next;
3325                         insert = 1;
3326                 }
3327         }
3328
3329         if (prev != NULL) {
3330                 /* Merge with previous one */
3331                 if (prev->base + prev->len == valid_entry->base) {
3332                         prev->len += valid_entry->len;
3333                         /* If it merge with next one, remove next node */
3334                         if (insert == 1) {
3335                                 LIST_REMOVE(valid_entry, next);
3336                                 rte_free(valid_entry);
3337                         } else {
3338                                 rte_free(valid_entry);
3339                                 insert = 1;
3340                         }
3341                 }
3342         }
3343
3344         /* Not find any entry to merge, insert */
3345         if (insert == 0) {
3346                 if (prev != NULL)
3347                         LIST_INSERT_AFTER(prev, valid_entry, next);
3348                 else if (next != NULL)
3349                         LIST_INSERT_BEFORE(next, valid_entry, next);
3350                 else /* It's empty list, insert to head */
3351                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
3352         }
3353
3354         pool->num_free += valid_entry->len;
3355         pool->num_alloc -= valid_entry->len;
3356
3357         return 0;
3358 }
3359
3360 static int
3361 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
3362                        uint16_t num)
3363 {
3364         struct pool_entry *entry, *valid_entry;
3365
3366         if (pool == NULL || num == 0) {
3367                 PMD_DRV_LOG(ERR, "Invalid parameter");
3368                 return -EINVAL;
3369         }
3370
3371         if (pool->num_free < num) {
3372                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
3373                             num, pool->num_free);
3374                 return -ENOMEM;
3375         }
3376
3377         valid_entry = NULL;
3378         /* Lookup  in free list and find most fit one */
3379         LIST_FOREACH(entry, &pool->free_list, next) {
3380                 if (entry->len >= num) {
3381                         /* Find best one */
3382                         if (entry->len == num) {
3383                                 valid_entry = entry;
3384                                 break;
3385                         }
3386                         if (valid_entry == NULL || valid_entry->len > entry->len)
3387                                 valid_entry = entry;
3388                 }
3389         }
3390
3391         /* Not find one to satisfy the request, return */
3392         if (valid_entry == NULL) {
3393                 PMD_DRV_LOG(ERR, "No valid entry found");
3394                 return -ENOMEM;
3395         }
3396         /**
3397          * The entry have equal queue number as requested,
3398          * remove it from alloc_list.
3399          */
3400         if (valid_entry->len == num) {
3401                 LIST_REMOVE(valid_entry, next);
3402         } else {
3403                 /**
3404                  * The entry have more numbers than requested,
3405                  * create a new entry for alloc_list and minus its
3406                  * queue base and number in free_list.
3407                  */
3408                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
3409                 if (entry == NULL) {
3410                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3411                                     "resource pool");
3412                         return -ENOMEM;
3413                 }
3414                 entry->base = valid_entry->base;
3415                 entry->len = num;
3416                 valid_entry->base += num;
3417                 valid_entry->len -= num;
3418                 valid_entry = entry;
3419         }
3420
3421         /* Insert it into alloc list, not sorted */
3422         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
3423
3424         pool->num_free -= valid_entry->len;
3425         pool->num_alloc += valid_entry->len;
3426
3427         return valid_entry->base + pool->base;
3428 }
3429
3430 /**
3431  * bitmap_is_subset - Check whether src2 is subset of src1
3432  **/
3433 static inline int
3434 bitmap_is_subset(uint8_t src1, uint8_t src2)
3435 {
3436         return !((src1 ^ src2) & src2);
3437 }
3438
3439 static enum i40e_status_code
3440 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3441 {
3442         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3443
3444         /* If DCB is not supported, only default TC is supported */
3445         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
3446                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
3447                 return I40E_NOT_SUPPORTED;
3448         }
3449
3450         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
3451                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
3452                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
3453                             enabled_tcmap);
3454                 return I40E_NOT_SUPPORTED;
3455         }
3456         return I40E_SUCCESS;
3457 }
3458
3459 int
3460 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
3461                                 struct i40e_vsi_vlan_pvid_info *info)
3462 {
3463         struct i40e_hw *hw;
3464         struct i40e_vsi_context ctxt;
3465         uint8_t vlan_flags = 0;
3466         int ret;
3467
3468         if (vsi == NULL || info == NULL) {
3469                 PMD_DRV_LOG(ERR, "invalid parameters");
3470                 return I40E_ERR_PARAM;
3471         }
3472
3473         if (info->on) {
3474                 vsi->info.pvid = info->config.pvid;
3475                 /**
3476                  * If insert pvid is enabled, only tagged pkts are
3477                  * allowed to be sent out.
3478                  */
3479                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
3480                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3481         } else {
3482                 vsi->info.pvid = 0;
3483                 if (info->config.reject.tagged == 0)
3484                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3485
3486                 if (info->config.reject.untagged == 0)
3487                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
3488         }
3489         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
3490                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
3491         vsi->info.port_vlan_flags |= vlan_flags;
3492         vsi->info.valid_sections =
3493                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3494         memset(&ctxt, 0, sizeof(ctxt));
3495         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3496         ctxt.seid = vsi->seid;
3497
3498         hw = I40E_VSI_TO_HW(vsi);
3499         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3500         if (ret != I40E_SUCCESS)
3501                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3502
3503         return ret;
3504 }
3505
3506 static int
3507 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3508 {
3509         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3510         int i, ret;
3511         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
3512
3513         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3514         if (ret != I40E_SUCCESS)
3515                 return ret;
3516
3517         if (!vsi->seid) {
3518                 PMD_DRV_LOG(ERR, "seid not valid");
3519                 return -EINVAL;
3520         }
3521
3522         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
3523         tc_bw_data.tc_valid_bits = enabled_tcmap;
3524         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3525                 tc_bw_data.tc_bw_credits[i] =
3526                         (enabled_tcmap & (1 << i)) ? 1 : 0;
3527
3528         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
3529         if (ret != I40E_SUCCESS) {
3530                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
3531                 return ret;
3532         }
3533
3534         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
3535                                         sizeof(vsi->info.qs_handle));
3536         return I40E_SUCCESS;
3537 }
3538
3539 static enum i40e_status_code
3540 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
3541                                  struct i40e_aqc_vsi_properties_data *info,
3542                                  uint8_t enabled_tcmap)
3543 {
3544         enum i40e_status_code ret;
3545         int i, total_tc = 0;
3546         uint16_t qpnum_per_tc, bsf, qp_idx;
3547
3548         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3549         if (ret != I40E_SUCCESS)
3550                 return ret;
3551
3552         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3553                 if (enabled_tcmap & (1 << i))
3554                         total_tc++;
3555         vsi->enabled_tc = enabled_tcmap;
3556
3557         /* Number of queues per enabled TC */
3558         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
3559         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
3560         bsf = rte_bsf32(qpnum_per_tc);
3561
3562         /* Adjust the queue number to actual queues that can be applied */
3563         if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
3564                 vsi->nb_qps = qpnum_per_tc * total_tc;
3565
3566         /**
3567          * Configure TC and queue mapping parameters, for enabled TC,
3568          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
3569          * default queue will serve it.
3570          */
3571         qp_idx = 0;
3572         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3573                 if (vsi->enabled_tc & (1 << i)) {
3574                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
3575                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
3576                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
3577                         qp_idx += qpnum_per_tc;
3578                 } else
3579                         info->tc_mapping[i] = 0;
3580         }
3581
3582         /* Associate queue number with VSI */
3583         if (vsi->type == I40E_VSI_SRIOV) {
3584                 info->mapping_flags |=
3585                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
3586                 for (i = 0; i < vsi->nb_qps; i++)
3587                         info->queue_mapping[i] =
3588                                 rte_cpu_to_le_16(vsi->base_queue + i);
3589         } else {
3590                 info->mapping_flags |=
3591                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
3592                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
3593         }
3594         info->valid_sections |=
3595                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
3596
3597         return I40E_SUCCESS;
3598 }
3599
3600 static int
3601 i40e_veb_release(struct i40e_veb *veb)
3602 {
3603         struct i40e_vsi *vsi;
3604         struct i40e_hw *hw;
3605
3606         if (veb == NULL || veb->associate_vsi == NULL)
3607                 return -EINVAL;
3608
3609         if (!TAILQ_EMPTY(&veb->head)) {
3610                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
3611                 return -EACCES;
3612         }
3613
3614         vsi = veb->associate_vsi;
3615         hw = I40E_VSI_TO_HW(vsi);
3616
3617         vsi->uplink_seid = veb->uplink_seid;
3618         i40e_aq_delete_element(hw, veb->seid, NULL);
3619         rte_free(veb);
3620         vsi->veb = NULL;
3621         return I40E_SUCCESS;
3622 }
3623
3624 /* Setup a veb */
3625 static struct i40e_veb *
3626 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
3627 {
3628         struct i40e_veb *veb;
3629         int ret;
3630         struct i40e_hw *hw;
3631
3632         if (NULL == pf || vsi == NULL) {
3633                 PMD_DRV_LOG(ERR, "veb setup failed, "
3634                             "associated VSI shouldn't null");
3635                 return NULL;
3636         }
3637         hw = I40E_PF_TO_HW(pf);
3638
3639         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
3640         if (!veb) {
3641                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
3642                 goto fail;
3643         }
3644
3645         veb->associate_vsi = vsi;
3646         TAILQ_INIT(&veb->head);
3647         veb->uplink_seid = vsi->uplink_seid;
3648
3649         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
3650                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
3651
3652         if (ret != I40E_SUCCESS) {
3653                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
3654                             hw->aq.asq_last_status);
3655                 goto fail;
3656         }
3657
3658         /* get statistics index */
3659         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
3660                                 &veb->stats_idx, NULL, NULL, NULL);
3661         if (ret != I40E_SUCCESS) {
3662                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
3663                             hw->aq.asq_last_status);
3664                 goto fail;
3665         }
3666
3667         /* Get VEB bandwidth, to be implemented */
3668         /* Now associated vsi binding to the VEB, set uplink to this VEB */
3669         vsi->uplink_seid = veb->seid;
3670
3671         return veb;
3672 fail:
3673         rte_free(veb);
3674         return NULL;
3675 }
3676
3677 int
3678 i40e_vsi_release(struct i40e_vsi *vsi)
3679 {
3680         struct i40e_pf *pf;
3681         struct i40e_hw *hw;
3682         struct i40e_vsi_list *vsi_list;
3683         int ret;
3684         struct i40e_mac_filter *f;
3685
3686         if (!vsi)
3687                 return I40E_SUCCESS;
3688
3689         pf = I40E_VSI_TO_PF(vsi);
3690         hw = I40E_VSI_TO_HW(vsi);
3691
3692         /* VSI has child to attach, release child first */
3693         if (vsi->veb) {
3694                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
3695                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
3696                                 return -1;
3697                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
3698                 }
3699                 i40e_veb_release(vsi->veb);
3700         }
3701
3702         /* Remove all macvlan filters of the VSI */
3703         i40e_vsi_remove_all_macvlan_filter(vsi);
3704         TAILQ_FOREACH(f, &vsi->mac_list, next)
3705                 rte_free(f);
3706
3707         if (vsi->type != I40E_VSI_MAIN) {
3708                 /* Remove vsi from parent's sibling list */
3709                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
3710                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
3711                         return I40E_ERR_PARAM;
3712                 }
3713                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
3714                                 &vsi->sib_vsi_list, list);
3715
3716                 /* Remove all switch element of the VSI */
3717                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
3718                 if (ret != I40E_SUCCESS)
3719                         PMD_DRV_LOG(ERR, "Failed to delete element");
3720         }
3721         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
3722
3723         if (vsi->type != I40E_VSI_SRIOV)
3724                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
3725         rte_free(vsi);
3726
3727         return I40E_SUCCESS;
3728 }
3729
3730 static int
3731 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
3732 {
3733         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3734         struct i40e_aqc_remove_macvlan_element_data def_filter;
3735         struct i40e_mac_filter_info filter;
3736         int ret;
3737
3738         if (vsi->type != I40E_VSI_MAIN)
3739                 return I40E_ERR_CONFIG;
3740         memset(&def_filter, 0, sizeof(def_filter));
3741         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
3742                                         ETH_ADDR_LEN);
3743         def_filter.vlan_tag = 0;
3744         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3745                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3746         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
3747         if (ret != I40E_SUCCESS) {
3748                 struct i40e_mac_filter *f;
3749                 struct ether_addr *mac;
3750
3751                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
3752                             "macvlan filter");
3753                 /* It needs to add the permanent mac into mac list */
3754                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3755                 if (f == NULL) {
3756                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3757                         return I40E_ERR_NO_MEMORY;
3758                 }
3759                 mac = &f->mac_info.mac_addr;
3760                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
3761                                 ETH_ADDR_LEN);
3762                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3763                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3764                 vsi->mac_num++;
3765
3766                 return ret;
3767         }
3768         (void)rte_memcpy(&filter.mac_addr,
3769                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
3770         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3771         return i40e_vsi_add_mac(vsi, &filter);
3772 }
3773
3774 #define I40E_3_BIT_MASK     0x7
3775 /*
3776  * i40e_vsi_get_bw_config - Query VSI BW Information
3777  * @vsi: the VSI to be queried
3778  *
3779  * Returns 0 on success, negative value on failure
3780  */
3781 static enum i40e_status_code
3782 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
3783 {
3784         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
3785         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
3786         struct i40e_hw *hw = &vsi->adapter->hw;
3787         i40e_status ret;
3788         int i;
3789         uint32_t bw_max;
3790
3791         memset(&bw_config, 0, sizeof(bw_config));
3792         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3793         if (ret != I40E_SUCCESS) {
3794                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
3795                             hw->aq.asq_last_status);
3796                 return ret;
3797         }
3798
3799         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
3800         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
3801                                         &ets_sla_config, NULL);
3802         if (ret != I40E_SUCCESS) {
3803                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
3804                             "configuration %u", hw->aq.asq_last_status);
3805                 return ret;
3806         }
3807
3808         /* store and print out BW info */
3809         vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
3810         vsi->bw_info.bw_max = bw_config.max_bw;
3811         PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
3812         PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
3813         bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
3814                     (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
3815                      I40E_16_BIT_WIDTH);
3816         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3817                 vsi->bw_info.bw_ets_share_credits[i] =
3818                                 ets_sla_config.share_credits[i];
3819                 vsi->bw_info.bw_ets_credits[i] =
3820                                 rte_le_to_cpu_16(ets_sla_config.credits[i]);
3821                 /* 4 bits per TC, 4th bit is reserved */
3822                 vsi->bw_info.bw_ets_max[i] =
3823                         (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
3824                                   I40E_3_BIT_MASK);
3825                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
3826                             vsi->bw_info.bw_ets_share_credits[i]);
3827                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
3828                             vsi->bw_info.bw_ets_credits[i]);
3829                 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
3830                             vsi->bw_info.bw_ets_max[i]);
3831         }
3832
3833         return I40E_SUCCESS;
3834 }
3835
3836 /* Setup a VSI */
3837 struct i40e_vsi *
3838 i40e_vsi_setup(struct i40e_pf *pf,
3839                enum i40e_vsi_type type,
3840                struct i40e_vsi *uplink_vsi,
3841                uint16_t user_param)
3842 {
3843         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3844         struct i40e_vsi *vsi;
3845         struct i40e_mac_filter_info filter;
3846         int ret;
3847         struct i40e_vsi_context ctxt;
3848         struct ether_addr broadcast =
3849                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
3850
3851         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
3852                 PMD_DRV_LOG(ERR, "VSI setup failed, "
3853                             "VSI link shouldn't be NULL");
3854                 return NULL;
3855         }
3856
3857         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
3858                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
3859                             "uplink VSI should be NULL");
3860                 return NULL;
3861         }
3862
3863         /* If uplink vsi didn't setup VEB, create one first */
3864         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
3865                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
3866
3867                 if (NULL == uplink_vsi->veb) {
3868                         PMD_DRV_LOG(ERR, "VEB setup failed");
3869                         return NULL;
3870                 }
3871         }
3872
3873         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
3874         if (!vsi) {
3875                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
3876                 return NULL;
3877         }
3878         TAILQ_INIT(&vsi->mac_list);
3879         vsi->type = type;
3880         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
3881         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
3882         vsi->parent_vsi = uplink_vsi;
3883         vsi->user_param = user_param;
3884         /* Allocate queues */
3885         switch (vsi->type) {
3886         case I40E_VSI_MAIN  :
3887                 vsi->nb_qps = pf->lan_nb_qps;
3888                 break;
3889         case I40E_VSI_SRIOV :
3890                 vsi->nb_qps = pf->vf_nb_qps;
3891                 break;
3892         case I40E_VSI_VMDQ2:
3893                 vsi->nb_qps = pf->vmdq_nb_qps;
3894                 break;
3895         case I40E_VSI_FDIR:
3896                 vsi->nb_qps = pf->fdir_nb_qps;
3897                 break;
3898         default:
3899                 goto fail_mem;
3900         }
3901         /*
3902          * The filter status descriptor is reported in rx queue 0,
3903          * while the tx queue for fdir filter programming has no
3904          * such constraints, can be non-zero queues.
3905          * To simplify it, choose FDIR vsi use queue 0 pair.
3906          * To make sure it will use queue 0 pair, queue allocation
3907          * need be done before this function is called
3908          */
3909         if (type != I40E_VSI_FDIR) {
3910                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
3911                         if (ret < 0) {
3912                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
3913                                                 vsi->seid, ret);
3914                                 goto fail_mem;
3915                         }
3916                         vsi->base_queue = ret;
3917         } else
3918                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
3919
3920         /* VF has MSIX interrupt in VF range, don't allocate here */
3921         if (type == I40E_VSI_MAIN) {
3922                 ret = i40e_res_pool_alloc(&pf->msix_pool,
3923                                           RTE_MIN(vsi->nb_qps,
3924                                                   RTE_MAX_RXTX_INTR_VEC_ID));
3925                 if (ret < 0) {
3926                         PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
3927                                     vsi->seid, ret);
3928                         goto fail_queue_alloc;
3929                 }
3930                 vsi->msix_intr = ret;
3931                 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
3932         } else if (type != I40E_VSI_SRIOV) {
3933                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
3934                 if (ret < 0) {
3935                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
3936                         goto fail_queue_alloc;
3937                 }
3938                 vsi->msix_intr = ret;
3939                 vsi->nb_msix = 1;
3940         } else {
3941                 vsi->msix_intr = 0;
3942                 vsi->nb_msix = 0;
3943         }
3944
3945         /* Add VSI */
3946         if (type == I40E_VSI_MAIN) {
3947                 /* For main VSI, no need to add since it's default one */
3948                 vsi->uplink_seid = pf->mac_seid;
3949                 vsi->seid = pf->main_vsi_seid;
3950                 /* Bind queues with specific MSIX interrupt */
3951                 /**
3952                  * Needs 2 interrupt at least, one for misc cause which will
3953                  * enabled from OS side, Another for queues binding the
3954                  * interrupt from device side only.
3955                  */
3956
3957                 /* Get default VSI parameters from hardware */
3958                 memset(&ctxt, 0, sizeof(ctxt));
3959                 ctxt.seid = vsi->seid;
3960                 ctxt.pf_num = hw->pf_id;
3961                 ctxt.uplink_seid = vsi->uplink_seid;
3962                 ctxt.vf_num = 0;
3963                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
3964                 if (ret != I40E_SUCCESS) {
3965                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
3966                         goto fail_msix_alloc;
3967                 }
3968                 (void)rte_memcpy(&vsi->info, &ctxt.info,
3969                         sizeof(struct i40e_aqc_vsi_properties_data));
3970                 vsi->vsi_id = ctxt.vsi_number;
3971                 vsi->info.valid_sections = 0;
3972
3973                 /* Configure tc, enabled TC0 only */
3974                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
3975                         I40E_SUCCESS) {
3976                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
3977                         goto fail_msix_alloc;
3978                 }
3979
3980                 /* TC, queue mapping */
3981                 memset(&ctxt, 0, sizeof(ctxt));
3982                 vsi->info.valid_sections |=
3983                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3984                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3985                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3986                 (void)rte_memcpy(&ctxt.info, &vsi->info,
3987                         sizeof(struct i40e_aqc_vsi_properties_data));
3988                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3989                                                 I40E_DEFAULT_TCMAP);
3990                 if (ret != I40E_SUCCESS) {
3991                         PMD_DRV_LOG(ERR, "Failed to configure "
3992                                     "TC queue mapping");
3993                         goto fail_msix_alloc;
3994                 }
3995                 ctxt.seid = vsi->seid;
3996                 ctxt.pf_num = hw->pf_id;
3997                 ctxt.uplink_seid = vsi->uplink_seid;
3998                 ctxt.vf_num = 0;
3999
4000                 /* Update VSI parameters */
4001                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4002                 if (ret != I40E_SUCCESS) {
4003                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
4004                         goto fail_msix_alloc;
4005                 }
4006
4007                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
4008                                                 sizeof(vsi->info.tc_mapping));
4009                 (void)rte_memcpy(&vsi->info.queue_mapping,
4010                                 &ctxt.info.queue_mapping,
4011                         sizeof(vsi->info.queue_mapping));
4012                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
4013                 vsi->info.valid_sections = 0;
4014
4015                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
4016                                 ETH_ADDR_LEN);
4017
4018                 /**
4019                  * Updating default filter settings are necessary to prevent
4020                  * reception of tagged packets.
4021                  * Some old firmware configurations load a default macvlan
4022                  * filter which accepts both tagged and untagged packets.
4023                  * The updating is to use a normal filter instead if needed.
4024                  * For NVM 4.2.2 or after, the updating is not needed anymore.
4025                  * The firmware with correct configurations load the default
4026                  * macvlan filter which is expected and cannot be removed.
4027                  */
4028                 i40e_update_default_filter_setting(vsi);
4029                 i40e_config_qinq(hw, vsi);
4030         } else if (type == I40E_VSI_SRIOV) {
4031                 memset(&ctxt, 0, sizeof(ctxt));
4032                 /**
4033                  * For other VSI, the uplink_seid equals to uplink VSI's
4034                  * uplink_seid since they share same VEB
4035                  */
4036                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4037                 ctxt.pf_num = hw->pf_id;
4038                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4039                 ctxt.uplink_seid = vsi->uplink_seid;
4040                 ctxt.connection_type = 0x1;
4041                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4042
4043                 /**
4044                  * Do not configure switch ID to enable VEB switch by
4045                  * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
4046                  * if the source mac address of packet sent from VF is not
4047                  * listed in the VEB's mac table, the VEB will switch the
4048                  * packet back to the VF. Need to enable it when HW issue
4049                  * is fixed.
4050                  */
4051
4052                 /* Configure port/vlan */
4053                 ctxt.info.valid_sections |=
4054                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4055                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4056                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4057                                                 I40E_DEFAULT_TCMAP);
4058                 if (ret != I40E_SUCCESS) {
4059                         PMD_DRV_LOG(ERR, "Failed to configure "
4060                                     "TC queue mapping");
4061                         goto fail_msix_alloc;
4062                 }
4063                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4064                 ctxt.info.valid_sections |=
4065                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4066                 /**
4067                  * Since VSI is not created yet, only configure parameter,
4068                  * will add vsi below.
4069                  */
4070
4071                 i40e_config_qinq(hw, vsi);
4072         } else if (type == I40E_VSI_VMDQ2) {
4073                 memset(&ctxt, 0, sizeof(ctxt));
4074                 /*
4075                  * For other VSI, the uplink_seid equals to uplink VSI's
4076                  * uplink_seid since they share same VEB
4077                  */
4078                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4079                 ctxt.pf_num = hw->pf_id;
4080                 ctxt.vf_num = 0;
4081                 ctxt.uplink_seid = vsi->uplink_seid;
4082                 ctxt.connection_type = 0x1;
4083                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4084
4085                 ctxt.info.valid_sections |=
4086                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4087                 /* user_param carries flag to enable loop back */
4088                 if (user_param) {
4089                         ctxt.info.switch_id =
4090                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
4091                         ctxt.info.switch_id |=
4092                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4093                 }
4094
4095                 /* Configure port/vlan */
4096                 ctxt.info.valid_sections |=
4097                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4098                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4099                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4100                                                 I40E_DEFAULT_TCMAP);
4101                 if (ret != I40E_SUCCESS) {
4102                         PMD_DRV_LOG(ERR, "Failed to configure "
4103                                         "TC queue mapping");
4104                         goto fail_msix_alloc;
4105                 }
4106                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4107                 ctxt.info.valid_sections |=
4108                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4109         } else if (type == I40E_VSI_FDIR) {
4110                 memset(&ctxt, 0, sizeof(ctxt));
4111                 vsi->uplink_seid = uplink_vsi->uplink_seid;
4112                 ctxt.pf_num = hw->pf_id;
4113                 ctxt.vf_num = 0;
4114                 ctxt.uplink_seid = vsi->uplink_seid;
4115                 ctxt.connection_type = 0x1;     /* regular data port */
4116                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4117                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4118                                                 I40E_DEFAULT_TCMAP);
4119                 if (ret != I40E_SUCCESS) {
4120                         PMD_DRV_LOG(ERR, "Failed to configure "
4121                                         "TC queue mapping.");
4122                         goto fail_msix_alloc;
4123                 }
4124                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4125                 ctxt.info.valid_sections |=
4126                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4127         } else {
4128                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
4129                 goto fail_msix_alloc;
4130         }
4131
4132         if (vsi->type != I40E_VSI_MAIN) {
4133                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
4134                 if (ret != I40E_SUCCESS) {
4135                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
4136                                     hw->aq.asq_last_status);
4137                         goto fail_msix_alloc;
4138                 }
4139                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
4140                 vsi->info.valid_sections = 0;
4141                 vsi->seid = ctxt.seid;
4142                 vsi->vsi_id = ctxt.vsi_number;
4143                 vsi->sib_vsi_list.vsi = vsi;
4144                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
4145                                 &vsi->sib_vsi_list, list);
4146         }
4147
4148         /* MAC/VLAN configuration */
4149         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
4150         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4151
4152         ret = i40e_vsi_add_mac(vsi, &filter);
4153         if (ret != I40E_SUCCESS) {
4154                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4155                 goto fail_msix_alloc;
4156         }
4157
4158         /* Get VSI BW information */
4159         i40e_vsi_get_bw_config(vsi);
4160         return vsi;
4161 fail_msix_alloc:
4162         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
4163 fail_queue_alloc:
4164         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
4165 fail_mem:
4166         rte_free(vsi);
4167         return NULL;
4168 }
4169
4170 /* Configure vlan stripping on or off */
4171 int
4172 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
4173 {
4174         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4175         struct i40e_vsi_context ctxt;
4176         uint8_t vlan_flags;
4177         int ret = I40E_SUCCESS;
4178
4179         /* Check if it has been already on or off */
4180         if (vsi->info.valid_sections &
4181                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
4182                 if (on) {
4183                         if ((vsi->info.port_vlan_flags &
4184                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
4185                                 return 0; /* already on */
4186                 } else {
4187                         if ((vsi->info.port_vlan_flags &
4188                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
4189                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
4190                                 return 0; /* already off */
4191                 }
4192         }
4193
4194         if (on)
4195                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4196         else
4197                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
4198         vsi->info.valid_sections =
4199                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4200         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
4201         vsi->info.port_vlan_flags |= vlan_flags;
4202         ctxt.seid = vsi->seid;
4203         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4204         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4205         if (ret)
4206                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
4207                             on ? "enable" : "disable");
4208
4209         return ret;
4210 }
4211
4212 static int
4213 i40e_dev_init_vlan(struct rte_eth_dev *dev)
4214 {
4215         struct rte_eth_dev_data *data = dev->data;
4216         int ret;
4217
4218         /* Apply vlan offload setting */
4219         i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
4220
4221         /* Apply double-vlan setting, not implemented yet */
4222
4223         /* Apply pvid setting */
4224         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
4225                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
4226         if (ret)
4227                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
4228
4229         return ret;
4230 }
4231
4232 static int
4233 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
4234 {
4235         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4236
4237         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
4238 }
4239
4240 static int
4241 i40e_update_flow_control(struct i40e_hw *hw)
4242 {
4243 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
4244         struct i40e_link_status link_status;
4245         uint32_t rxfc = 0, txfc = 0, reg;
4246         uint8_t an_info;
4247         int ret;
4248
4249         memset(&link_status, 0, sizeof(link_status));
4250         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
4251         if (ret != I40E_SUCCESS) {
4252                 PMD_DRV_LOG(ERR, "Failed to get link status information");
4253                 goto write_reg; /* Disable flow control */
4254         }
4255
4256         an_info = hw->phy.link_info.an_info;
4257         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
4258                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
4259                 ret = I40E_ERR_NOT_READY;
4260                 goto write_reg; /* Disable flow control */
4261         }
4262         /**
4263          * If link auto negotiation is enabled, flow control needs to
4264          * be configured according to it
4265          */
4266         switch (an_info & I40E_LINK_PAUSE_RXTX) {
4267         case I40E_LINK_PAUSE_RXTX:
4268                 rxfc = 1;
4269                 txfc = 1;
4270                 hw->fc.current_mode = I40E_FC_FULL;
4271                 break;
4272         case I40E_AQ_LINK_PAUSE_RX:
4273                 rxfc = 1;
4274                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
4275                 break;
4276         case I40E_AQ_LINK_PAUSE_TX:
4277                 txfc = 1;
4278                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
4279                 break;
4280         default:
4281                 hw->fc.current_mode = I40E_FC_NONE;
4282                 break;
4283         }
4284
4285 write_reg:
4286         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
4287                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
4288         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4289         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
4290         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
4291         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
4292
4293         return ret;
4294 }
4295
4296 /* PF setup */
4297 static int
4298 i40e_pf_setup(struct i40e_pf *pf)
4299 {
4300         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4301         struct i40e_filter_control_settings settings;
4302         struct i40e_vsi *vsi;
4303         int ret;
4304
4305         /* Clear all stats counters */
4306         pf->offset_loaded = FALSE;
4307         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
4308         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
4309
4310         ret = i40e_pf_get_switch_config(pf);
4311         if (ret != I40E_SUCCESS) {
4312                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
4313                 return ret;
4314         }
4315         if (pf->flags & I40E_FLAG_FDIR) {
4316                 /* make queue allocated first, let FDIR use queue pair 0*/
4317                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
4318                 if (ret != I40E_FDIR_QUEUE_ID) {
4319                         PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
4320                                     " ret =%d", ret);
4321                         pf->flags &= ~I40E_FLAG_FDIR;
4322                 }
4323         }
4324         /*  main VSI setup */
4325         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
4326         if (!vsi) {
4327                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
4328                 return I40E_ERR_NOT_READY;
4329         }
4330         pf->main_vsi = vsi;
4331
4332         /* Configure filter control */
4333         memset(&settings, 0, sizeof(settings));
4334         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
4335                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
4336         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
4337                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
4338         else {
4339                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
4340                                                 hw->func_caps.rss_table_size);
4341                 return I40E_ERR_PARAM;
4342         }
4343         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
4344                         "size: %u\n", hw->func_caps.rss_table_size);
4345         pf->hash_lut_size = hw->func_caps.rss_table_size;
4346
4347         /* Enable ethtype and macvlan filters */
4348         settings.enable_ethtype = TRUE;
4349         settings.enable_macvlan = TRUE;
4350         ret = i40e_set_filter_control(hw, &settings);
4351         if (ret)
4352                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
4353                                                                 ret);
4354
4355         /* Update flow control according to the auto negotiation */
4356         i40e_update_flow_control(hw);
4357
4358         return I40E_SUCCESS;
4359 }
4360
4361 int
4362 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4363 {
4364         uint32_t reg;
4365         uint16_t j;
4366
4367         /**
4368          * Set or clear TX Queue Disable flags,
4369          * which is required by hardware.
4370          */
4371         i40e_pre_tx_queue_cfg(hw, q_idx, on);
4372         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
4373
4374         /* Wait until the request is finished */
4375         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4376                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4377                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4378                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4379                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
4380                                                         & 0x1))) {
4381                         break;
4382                 }
4383         }
4384         if (on) {
4385                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4386                         return I40E_SUCCESS; /* already on, skip next steps */
4387
4388                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
4389                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4390         } else {
4391                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4392                         return I40E_SUCCESS; /* already off, skip next steps */
4393                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4394         }
4395         /* Write the register */
4396         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
4397         /* Check the result */
4398         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4399                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4400                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4401                 if (on) {
4402                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4403                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
4404                                 break;
4405                 } else {
4406                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4407                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4408                                 break;
4409                 }
4410         }
4411         /* Check if it is timeout */
4412         if (j >= I40E_CHK_Q_ENA_COUNT) {
4413                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
4414                             (on ? "enable" : "disable"), q_idx);
4415                 return I40E_ERR_TIMEOUT;
4416         }
4417
4418         return I40E_SUCCESS;
4419 }
4420
4421 /* Swith on or off the tx queues */
4422 static int
4423 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
4424 {
4425         struct rte_eth_dev_data *dev_data = pf->dev_data;
4426         struct i40e_tx_queue *txq;
4427         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4428         uint16_t i;
4429         int ret;
4430
4431         for (i = 0; i < dev_data->nb_tx_queues; i++) {
4432                 txq = dev_data->tx_queues[i];
4433                 /* Don't operate the queue if not configured or
4434                  * if starting only per queue */
4435                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
4436                         continue;
4437                 if (on)
4438                         ret = i40e_dev_tx_queue_start(dev, i);
4439                 else
4440                         ret = i40e_dev_tx_queue_stop(dev, i);
4441                 if ( ret != I40E_SUCCESS)
4442                         return ret;
4443         }
4444
4445         return I40E_SUCCESS;
4446 }
4447
4448 int
4449 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4450 {
4451         uint32_t reg;
4452         uint16_t j;
4453
4454         /* Wait until the request is finished */
4455         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4456                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4457                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4458                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4459                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
4460                         break;
4461         }
4462
4463         if (on) {
4464                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
4465                         return I40E_SUCCESS; /* Already on, skip next steps */
4466                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4467         } else {
4468                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4469                         return I40E_SUCCESS; /* Already off, skip next steps */
4470                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4471         }
4472
4473         /* Write the register */
4474         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
4475         /* Check the result */
4476         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4477                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4478                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4479                 if (on) {
4480                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4481                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
4482                                 break;
4483                 } else {
4484                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4485                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4486                                 break;
4487                 }
4488         }
4489
4490         /* Check if it is timeout */
4491         if (j >= I40E_CHK_Q_ENA_COUNT) {
4492                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
4493                             (on ? "enable" : "disable"), q_idx);
4494                 return I40E_ERR_TIMEOUT;
4495         }
4496
4497         return I40E_SUCCESS;
4498 }
4499 /* Switch on or off the rx queues */
4500 static int
4501 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
4502 {
4503         struct rte_eth_dev_data *dev_data = pf->dev_data;
4504         struct i40e_rx_queue *rxq;
4505         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4506         uint16_t i;
4507         int ret;
4508
4509         for (i = 0; i < dev_data->nb_rx_queues; i++) {
4510                 rxq = dev_data->rx_queues[i];
4511                 /* Don't operate the queue if not configured or
4512                  * if starting only per queue */
4513                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
4514                         continue;
4515                 if (on)
4516                         ret = i40e_dev_rx_queue_start(dev, i);
4517                 else
4518                         ret = i40e_dev_rx_queue_stop(dev, i);
4519                 if (ret != I40E_SUCCESS)
4520                         return ret;
4521         }
4522
4523         return I40E_SUCCESS;
4524 }
4525
4526 /* Switch on or off all the rx/tx queues */
4527 int
4528 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
4529 {
4530         int ret;
4531
4532         if (on) {
4533                 /* enable rx queues before enabling tx queues */
4534                 ret = i40e_dev_switch_rx_queues(pf, on);
4535                 if (ret) {
4536                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
4537                         return ret;
4538                 }
4539                 ret = i40e_dev_switch_tx_queues(pf, on);
4540         } else {
4541                 /* Stop tx queues before stopping rx queues */
4542                 ret = i40e_dev_switch_tx_queues(pf, on);
4543                 if (ret) {
4544                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
4545                         return ret;
4546                 }
4547                 ret = i40e_dev_switch_rx_queues(pf, on);
4548         }
4549
4550         return ret;
4551 }
4552
4553 /* Initialize VSI for TX */
4554 static int
4555 i40e_dev_tx_init(struct i40e_pf *pf)
4556 {
4557         struct rte_eth_dev_data *data = pf->dev_data;
4558         uint16_t i;
4559         uint32_t ret = I40E_SUCCESS;
4560         struct i40e_tx_queue *txq;
4561
4562         for (i = 0; i < data->nb_tx_queues; i++) {
4563                 txq = data->tx_queues[i];
4564                 if (!txq || !txq->q_set)
4565                         continue;
4566                 ret = i40e_tx_queue_init(txq);
4567                 if (ret != I40E_SUCCESS)
4568                         break;
4569         }
4570         if (ret == I40E_SUCCESS)
4571                 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
4572                                      ->eth_dev);
4573
4574         return ret;
4575 }
4576
4577 /* Initialize VSI for RX */
4578 static int
4579 i40e_dev_rx_init(struct i40e_pf *pf)
4580 {
4581         struct rte_eth_dev_data *data = pf->dev_data;
4582         int ret = I40E_SUCCESS;
4583         uint16_t i;
4584         struct i40e_rx_queue *rxq;
4585
4586         i40e_pf_config_mq_rx(pf);
4587         for (i = 0; i < data->nb_rx_queues; i++) {
4588                 rxq = data->rx_queues[i];
4589                 if (!rxq || !rxq->q_set)
4590                         continue;
4591
4592                 ret = i40e_rx_queue_init(rxq);
4593                 if (ret != I40E_SUCCESS) {
4594                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
4595                                     "initialization");
4596                         break;
4597                 }
4598         }
4599         if (ret == I40E_SUCCESS)
4600                 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
4601                                      ->eth_dev);
4602
4603         return ret;
4604 }
4605
4606 static int
4607 i40e_dev_rxtx_init(struct i40e_pf *pf)
4608 {
4609         int err;
4610
4611         err = i40e_dev_tx_init(pf);
4612         if (err) {
4613                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
4614                 return err;
4615         }
4616         err = i40e_dev_rx_init(pf);
4617         if (err) {
4618                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
4619                 return err;
4620         }
4621
4622         return err;
4623 }
4624
4625 static int
4626 i40e_vmdq_setup(struct rte_eth_dev *dev)
4627 {
4628         struct rte_eth_conf *conf = &dev->data->dev_conf;
4629         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4630         int i, err, conf_vsis, j, loop;
4631         struct i40e_vsi *vsi;
4632         struct i40e_vmdq_info *vmdq_info;
4633         struct rte_eth_vmdq_rx_conf *vmdq_conf;
4634         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4635
4636         /*
4637          * Disable interrupt to avoid message from VF. Furthermore, it will
4638          * avoid race condition in VSI creation/destroy.
4639          */
4640         i40e_pf_disable_irq0(hw);
4641
4642         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
4643                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
4644                 return -ENOTSUP;
4645         }
4646
4647         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
4648         if (conf_vsis > pf->max_nb_vmdq_vsi) {
4649                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
4650                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
4651                         pf->max_nb_vmdq_vsi);
4652                 return -ENOTSUP;
4653         }
4654
4655         if (pf->vmdq != NULL) {
4656                 PMD_INIT_LOG(INFO, "VMDQ already configured");
4657                 return 0;
4658         }
4659
4660         pf->vmdq = rte_zmalloc("vmdq_info_struct",
4661                                 sizeof(*vmdq_info) * conf_vsis, 0);
4662
4663         if (pf->vmdq == NULL) {
4664                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
4665                 return -ENOMEM;
4666         }
4667
4668         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
4669
4670         /* Create VMDQ VSI */
4671         for (i = 0; i < conf_vsis; i++) {
4672                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
4673                                 vmdq_conf->enable_loop_back);
4674                 if (vsi == NULL) {
4675                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
4676                         err = -1;
4677                         goto err_vsi_setup;
4678                 }
4679                 vmdq_info = &pf->vmdq[i];
4680                 vmdq_info->pf = pf;
4681                 vmdq_info->vsi = vsi;
4682         }
4683         pf->nb_cfg_vmdq_vsi = conf_vsis;
4684
4685         /* Configure Vlan */
4686         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
4687         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
4688                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
4689                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
4690                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
4691                                         vmdq_conf->pool_map[i].vlan_id, j);
4692
4693                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
4694                                                 vmdq_conf->pool_map[i].vlan_id);
4695                                 if (err) {
4696                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
4697                                         err = -1;
4698                                         goto err_vsi_setup;
4699                                 }
4700                         }
4701                 }
4702         }
4703
4704         i40e_pf_enable_irq0(hw);
4705
4706         return 0;
4707
4708 err_vsi_setup:
4709         for (i = 0; i < conf_vsis; i++)
4710                 if (pf->vmdq[i].vsi == NULL)
4711                         break;
4712                 else
4713                         i40e_vsi_release(pf->vmdq[i].vsi);
4714
4715         rte_free(pf->vmdq);
4716         pf->vmdq = NULL;
4717         i40e_pf_enable_irq0(hw);
4718         return err;
4719 }
4720
4721 static void
4722 i40e_stat_update_32(struct i40e_hw *hw,
4723                    uint32_t reg,
4724                    bool offset_loaded,
4725                    uint64_t *offset,
4726                    uint64_t *stat)
4727 {
4728         uint64_t new_data;
4729
4730         new_data = (uint64_t)I40E_READ_REG(hw, reg);
4731         if (!offset_loaded)
4732                 *offset = new_data;
4733
4734         if (new_data >= *offset)
4735                 *stat = (uint64_t)(new_data - *offset);
4736         else
4737                 *stat = (uint64_t)((new_data +
4738                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
4739 }
4740
4741 static void
4742 i40e_stat_update_48(struct i40e_hw *hw,
4743                    uint32_t hireg,
4744                    uint32_t loreg,
4745                    bool offset_loaded,
4746                    uint64_t *offset,
4747                    uint64_t *stat)
4748 {
4749         uint64_t new_data;
4750
4751         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
4752         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
4753                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
4754
4755         if (!offset_loaded)
4756                 *offset = new_data;
4757
4758         if (new_data >= *offset)
4759                 *stat = new_data - *offset;
4760         else
4761                 *stat = (uint64_t)((new_data +
4762                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
4763
4764         *stat &= I40E_48_BIT_MASK;
4765 }
4766
4767 /* Disable IRQ0 */
4768 void
4769 i40e_pf_disable_irq0(struct i40e_hw *hw)
4770 {
4771         /* Disable all interrupt types */
4772         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
4773         I40E_WRITE_FLUSH(hw);
4774 }
4775
4776 /* Enable IRQ0 */
4777 void
4778 i40e_pf_enable_irq0(struct i40e_hw *hw)
4779 {
4780         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
4781                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
4782                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4783                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
4784         I40E_WRITE_FLUSH(hw);
4785 }
4786
4787 static void
4788 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
4789 {
4790         /* read pending request and disable first */
4791         i40e_pf_disable_irq0(hw);
4792         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
4793         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
4794                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
4795
4796         if (no_queue)
4797                 /* Link no queues with irq0 */
4798                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
4799                                I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
4800 }
4801
4802 static void
4803 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
4804 {
4805         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4806         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4807         int i;
4808         uint16_t abs_vf_id;
4809         uint32_t index, offset, val;
4810
4811         if (!pf->vfs)
4812                 return;
4813         /**
4814          * Try to find which VF trigger a reset, use absolute VF id to access
4815          * since the reg is global register.
4816          */
4817         for (i = 0; i < pf->vf_num; i++) {
4818                 abs_vf_id = hw->func_caps.vf_base_id + i;
4819                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
4820                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
4821                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
4822                 /* VFR event occured */
4823                 if (val & (0x1 << offset)) {
4824                         int ret;
4825
4826                         /* Clear the event first */
4827                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
4828                                                         (0x1 << offset));
4829                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
4830                         /**
4831                          * Only notify a VF reset event occured,
4832                          * don't trigger another SW reset
4833                          */
4834                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
4835                         if (ret != I40E_SUCCESS)
4836                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
4837                 }
4838         }
4839 }
4840
4841 static void
4842 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
4843 {
4844         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4845         struct i40e_arq_event_info info;
4846         uint16_t pending, opcode;
4847         int ret;
4848
4849         info.buf_len = I40E_AQ_BUF_SZ;
4850         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
4851         if (!info.msg_buf) {
4852                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
4853                 return;
4854         }
4855
4856         pending = 1;
4857         while (pending) {
4858                 ret = i40e_clean_arq_element(hw, &info, &pending);
4859
4860                 if (ret != I40E_SUCCESS) {
4861                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
4862                                     "aq_err: %u", hw->aq.asq_last_status);
4863                         break;
4864                 }
4865                 opcode = rte_le_to_cpu_16(info.desc.opcode);
4866
4867                 switch (opcode) {
4868                 case i40e_aqc_opc_send_msg_to_pf:
4869                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
4870                         i40e_pf_host_handle_vf_msg(dev,
4871                                         rte_le_to_cpu_16(info.desc.retval),
4872                                         rte_le_to_cpu_32(info.desc.cookie_high),
4873                                         rte_le_to_cpu_32(info.desc.cookie_low),
4874                                         info.msg_buf,
4875                                         info.msg_len);
4876                         break;
4877                 default:
4878                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
4879                                     opcode);
4880                         break;
4881                 }
4882         }
4883         rte_free(info.msg_buf);
4884 }
4885
4886 /*
4887  * Interrupt handler is registered as the alarm callback for handling LSC
4888  * interrupt in a definite of time, in order to wait the NIC into a stable
4889  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
4890  * no need for link down interrupt.
4891  */
4892 static void
4893 i40e_dev_interrupt_delayed_handler(void *param)
4894 {
4895         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4896         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4897         uint32_t icr0;
4898
4899         /* read interrupt causes again */
4900         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4901
4902 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4903         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4904                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
4905         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4906                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
4907         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4908                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
4909         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4910                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
4911         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4912                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
4913                                                                 "state\n");
4914         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4915                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
4916         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4917                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
4918 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4919
4920         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4921                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
4922                 i40e_dev_handle_vfr_event(dev);
4923         }
4924         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4925                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
4926                 i40e_dev_handle_aq_msg(dev);
4927         }
4928
4929         /* handle the link up interrupt in an alarm callback */
4930         i40e_dev_link_update(dev, 0);
4931         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
4932
4933         i40e_pf_enable_irq0(hw);
4934         rte_intr_enable(&(dev->pci_dev->intr_handle));
4935 }
4936
4937 /**
4938  * Interrupt handler triggered by NIC  for handling
4939  * specific interrupt.
4940  *
4941  * @param handle
4942  *  Pointer to interrupt handle.
4943  * @param param
4944  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4945  *
4946  * @return
4947  *  void
4948  */
4949 static void
4950 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
4951                            void *param)
4952 {
4953         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4954         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4955         uint32_t icr0;
4956
4957         /* Disable interrupt */
4958         i40e_pf_disable_irq0(hw);
4959
4960         /* read out interrupt causes */
4961         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4962
4963         /* No interrupt event indicated */
4964         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
4965                 PMD_DRV_LOG(INFO, "No interrupt event");
4966                 goto done;
4967         }
4968 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4969         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4970                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
4971         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4972                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
4973         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4974                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
4975         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4976                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
4977         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4978                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
4979         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4980                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
4981         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4982                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
4983 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4984
4985         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4986                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
4987                 i40e_dev_handle_vfr_event(dev);
4988         }
4989         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4990                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
4991                 i40e_dev_handle_aq_msg(dev);
4992         }
4993
4994         /* Link Status Change interrupt */
4995         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
4996 #define I40E_US_PER_SECOND 1000000
4997                 struct rte_eth_link link;
4998
4999                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
5000                 memset(&link, 0, sizeof(link));
5001                 rte_i40e_dev_atomic_read_link_status(dev, &link);
5002                 i40e_dev_link_update(dev, 0);
5003
5004                 /*
5005                  * For link up interrupt, it needs to wait 1 second to let the
5006                  * hardware be a stable state. Otherwise several consecutive
5007                  * interrupts can be observed.
5008                  * For link down interrupt, no need to wait.
5009                  */
5010                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
5011                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
5012                         return;
5013                 else
5014                         _rte_eth_dev_callback_process(dev,
5015                                 RTE_ETH_EVENT_INTR_LSC);
5016         }
5017
5018 done:
5019         /* Enable interrupt */
5020         i40e_pf_enable_irq0(hw);
5021         rte_intr_enable(&(dev->pci_dev->intr_handle));
5022 }
5023
5024 static int
5025 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
5026                          struct i40e_macvlan_filter *filter,
5027                          int total)
5028 {
5029         int ele_num, ele_buff_size;
5030         int num, actual_num, i;
5031         uint16_t flags;
5032         int ret = I40E_SUCCESS;
5033         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5034         struct i40e_aqc_add_macvlan_element_data *req_list;
5035
5036         if (filter == NULL  || total == 0)
5037                 return I40E_ERR_PARAM;
5038         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5039         ele_buff_size = hw->aq.asq_buf_size;
5040
5041         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5042         if (req_list == NULL) {
5043                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5044                 return I40E_ERR_NO_MEMORY;
5045         }
5046
5047         num = 0;
5048         do {
5049                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5050                 memset(req_list, 0, ele_buff_size);
5051
5052                 for (i = 0; i < actual_num; i++) {
5053                         (void)rte_memcpy(req_list[i].mac_addr,
5054                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5055                         req_list[i].vlan_tag =
5056                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5057
5058                         switch (filter[num + i].filter_type) {
5059                         case RTE_MAC_PERFECT_MATCH:
5060                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5061                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5062                                 break;
5063                         case RTE_MACVLAN_PERFECT_MATCH:
5064                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5065                                 break;
5066                         case RTE_MAC_HASH_MATCH:
5067                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5068                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5069                                 break;
5070                         case RTE_MACVLAN_HASH_MATCH:
5071                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5072                                 break;
5073                         default:
5074                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
5075                                 ret = I40E_ERR_PARAM;
5076                                 goto DONE;
5077                         }
5078
5079                         req_list[i].queue_number = 0;
5080
5081                         req_list[i].flags = rte_cpu_to_le_16(flags);
5082                 }
5083
5084                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
5085                                                 actual_num, NULL);
5086                 if (ret != I40E_SUCCESS) {
5087                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
5088                         goto DONE;
5089                 }
5090                 num += actual_num;
5091         } while (num < total);
5092
5093 DONE:
5094         rte_free(req_list);
5095         return ret;
5096 }
5097
5098 static int
5099 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
5100                             struct i40e_macvlan_filter *filter,
5101                             int total)
5102 {
5103         int ele_num, ele_buff_size;
5104         int num, actual_num, i;
5105         uint16_t flags;
5106         int ret = I40E_SUCCESS;
5107         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5108         struct i40e_aqc_remove_macvlan_element_data *req_list;
5109
5110         if (filter == NULL  || total == 0)
5111                 return I40E_ERR_PARAM;
5112
5113         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5114         ele_buff_size = hw->aq.asq_buf_size;
5115
5116         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
5117         if (req_list == NULL) {
5118                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
5119                 return I40E_ERR_NO_MEMORY;
5120         }
5121
5122         num = 0;
5123         do {
5124                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5125                 memset(req_list, 0, ele_buff_size);
5126
5127                 for (i = 0; i < actual_num; i++) {
5128                         (void)rte_memcpy(req_list[i].mac_addr,
5129                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
5130                         req_list[i].vlan_tag =
5131                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
5132
5133                         switch (filter[num + i].filter_type) {
5134                         case RTE_MAC_PERFECT_MATCH:
5135                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5136                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5137                                 break;
5138                         case RTE_MACVLAN_PERFECT_MATCH:
5139                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
5140                                 break;
5141                         case RTE_MAC_HASH_MATCH:
5142                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
5143                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5144                                 break;
5145                         case RTE_MACVLAN_HASH_MATCH:
5146                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
5147                                 break;
5148                         default:
5149                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
5150                                 ret = I40E_ERR_PARAM;
5151                                 goto DONE;
5152                         }
5153                         req_list[i].flags = rte_cpu_to_le_16(flags);
5154                 }
5155
5156                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
5157                                                 actual_num, NULL);
5158                 if (ret != I40E_SUCCESS) {
5159                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
5160                         goto DONE;
5161                 }
5162                 num += actual_num;
5163         } while (num < total);
5164
5165 DONE:
5166         rte_free(req_list);
5167         return ret;
5168 }
5169
5170 /* Find out specific MAC filter */
5171 static struct i40e_mac_filter *
5172 i40e_find_mac_filter(struct i40e_vsi *vsi,
5173                          struct ether_addr *macaddr)
5174 {
5175         struct i40e_mac_filter *f;
5176
5177         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5178                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
5179                         return f;
5180         }
5181
5182         return NULL;
5183 }
5184
5185 static bool
5186 i40e_find_vlan_filter(struct i40e_vsi *vsi,
5187                          uint16_t vlan_id)
5188 {
5189         uint32_t vid_idx, vid_bit;
5190
5191         if (vlan_id > ETH_VLAN_ID_MAX)
5192                 return 0;
5193
5194         vid_idx = I40E_VFTA_IDX(vlan_id);
5195         vid_bit = I40E_VFTA_BIT(vlan_id);
5196
5197         if (vsi->vfta[vid_idx] & vid_bit)
5198                 return 1;
5199         else
5200                 return 0;
5201 }
5202
5203 static void
5204 i40e_set_vlan_filter(struct i40e_vsi *vsi,
5205                          uint16_t vlan_id, bool on)
5206 {
5207         uint32_t vid_idx, vid_bit;
5208
5209         if (vlan_id > ETH_VLAN_ID_MAX)
5210                 return;
5211
5212         vid_idx = I40E_VFTA_IDX(vlan_id);
5213         vid_bit = I40E_VFTA_BIT(vlan_id);
5214
5215         if (on)
5216                 vsi->vfta[vid_idx] |= vid_bit;
5217         else
5218                 vsi->vfta[vid_idx] &= ~vid_bit;
5219 }
5220
5221 /**
5222  * Find all vlan options for specific mac addr,
5223  * return with actual vlan found.
5224  */
5225 static inline int
5226 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
5227                            struct i40e_macvlan_filter *mv_f,
5228                            int num, struct ether_addr *addr)
5229 {
5230         int i;
5231         uint32_t j, k;
5232
5233         /**
5234          * Not to use i40e_find_vlan_filter to decrease the loop time,
5235          * although the code looks complex.
5236           */
5237         if (num < vsi->vlan_num)
5238                 return I40E_ERR_PARAM;
5239
5240         i = 0;
5241         for (j = 0; j < I40E_VFTA_SIZE; j++) {
5242                 if (vsi->vfta[j]) {
5243                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
5244                                 if (vsi->vfta[j] & (1 << k)) {
5245                                         if (i > num - 1) {
5246                                                 PMD_DRV_LOG(ERR, "vlan number "
5247                                                             "not match");
5248                                                 return I40E_ERR_PARAM;
5249                                         }
5250                                         (void)rte_memcpy(&mv_f[i].macaddr,
5251                                                         addr, ETH_ADDR_LEN);
5252                                         mv_f[i].vlan_id =
5253                                                 j * I40E_UINT32_BIT_SIZE + k;
5254                                         i++;
5255                                 }
5256                         }
5257                 }
5258         }
5259         return I40E_SUCCESS;
5260 }
5261
5262 static inline int
5263 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
5264                            struct i40e_macvlan_filter *mv_f,
5265                            int num,
5266                            uint16_t vlan)
5267 {
5268         int i = 0;
5269         struct i40e_mac_filter *f;
5270
5271         if (num < vsi->mac_num)
5272                 return I40E_ERR_PARAM;
5273
5274         TAILQ_FOREACH(f, &vsi->mac_list, next) {
5275                 if (i > num - 1) {
5276                         PMD_DRV_LOG(ERR, "buffer number not match");
5277                         return I40E_ERR_PARAM;
5278                 }
5279                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5280                                 ETH_ADDR_LEN);
5281                 mv_f[i].vlan_id = vlan;
5282                 mv_f[i].filter_type = f->mac_info.filter_type;
5283                 i++;
5284         }
5285
5286         return I40E_SUCCESS;
5287 }
5288
5289 static int
5290 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
5291 {
5292         int i, num;
5293         struct i40e_mac_filter *f;
5294         struct i40e_macvlan_filter *mv_f;
5295         int ret = I40E_SUCCESS;
5296
5297         if (vsi == NULL || vsi->mac_num == 0)
5298                 return I40E_ERR_PARAM;
5299
5300         /* Case that no vlan is set */
5301         if (vsi->vlan_num == 0)
5302                 num = vsi->mac_num;
5303         else
5304                 num = vsi->mac_num * vsi->vlan_num;
5305
5306         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
5307         if (mv_f == NULL) {
5308                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5309                 return I40E_ERR_NO_MEMORY;
5310         }
5311
5312         i = 0;
5313         if (vsi->vlan_num == 0) {
5314                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5315                         (void)rte_memcpy(&mv_f[i].macaddr,
5316                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
5317                         mv_f[i].vlan_id = 0;
5318                         i++;
5319                 }
5320         } else {
5321                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
5322                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
5323                                         vsi->vlan_num, &f->mac_info.mac_addr);
5324                         if (ret != I40E_SUCCESS)
5325                                 goto DONE;
5326                         i += vsi->vlan_num;
5327                 }
5328         }
5329
5330         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
5331 DONE:
5332         rte_free(mv_f);
5333
5334         return ret;
5335 }
5336
5337 int
5338 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5339 {
5340         struct i40e_macvlan_filter *mv_f;
5341         int mac_num;
5342         int ret = I40E_SUCCESS;
5343
5344         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
5345                 return I40E_ERR_PARAM;
5346
5347         /* If it's already set, just return */
5348         if (i40e_find_vlan_filter(vsi,vlan))
5349                 return I40E_SUCCESS;
5350
5351         mac_num = vsi->mac_num;
5352
5353         if (mac_num == 0) {
5354                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5355                 return I40E_ERR_PARAM;
5356         }
5357
5358         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5359
5360         if (mv_f == NULL) {
5361                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5362                 return I40E_ERR_NO_MEMORY;
5363         }
5364
5365         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5366
5367         if (ret != I40E_SUCCESS)
5368                 goto DONE;
5369
5370         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5371
5372         if (ret != I40E_SUCCESS)
5373                 goto DONE;
5374
5375         i40e_set_vlan_filter(vsi, vlan, 1);
5376
5377         vsi->vlan_num++;
5378         ret = I40E_SUCCESS;
5379 DONE:
5380         rte_free(mv_f);
5381         return ret;
5382 }
5383
5384 int
5385 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5386 {
5387         struct i40e_macvlan_filter *mv_f;
5388         int mac_num;
5389         int ret = I40E_SUCCESS;
5390
5391         /**
5392          * Vlan 0 is the generic filter for untagged packets
5393          * and can't be removed.
5394          */
5395         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
5396                 return I40E_ERR_PARAM;
5397
5398         /* If can't find it, just return */
5399         if (!i40e_find_vlan_filter(vsi, vlan))
5400                 return I40E_ERR_PARAM;
5401
5402         mac_num = vsi->mac_num;
5403
5404         if (mac_num == 0) {
5405                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5406                 return I40E_ERR_PARAM;
5407         }
5408
5409         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5410
5411         if (mv_f == NULL) {
5412                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5413                 return I40E_ERR_NO_MEMORY;
5414         }
5415
5416         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5417
5418         if (ret != I40E_SUCCESS)
5419                 goto DONE;
5420
5421         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
5422
5423         if (ret != I40E_SUCCESS)
5424                 goto DONE;
5425
5426         /* This is last vlan to remove, replace all mac filter with vlan 0 */
5427         if (vsi->vlan_num == 1) {
5428                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
5429                 if (ret != I40E_SUCCESS)
5430                         goto DONE;
5431
5432                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5433                 if (ret != I40E_SUCCESS)
5434                         goto DONE;
5435         }
5436
5437         i40e_set_vlan_filter(vsi, vlan, 0);
5438
5439         vsi->vlan_num--;
5440         ret = I40E_SUCCESS;
5441 DONE:
5442         rte_free(mv_f);
5443         return ret;
5444 }
5445
5446 int
5447 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
5448 {
5449         struct i40e_mac_filter *f;
5450         struct i40e_macvlan_filter *mv_f;
5451         int i, vlan_num = 0;
5452         int ret = I40E_SUCCESS;
5453
5454         /* If it's add and we've config it, return */
5455         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
5456         if (f != NULL)
5457                 return I40E_SUCCESS;
5458         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
5459                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
5460
5461                 /**
5462                  * If vlan_num is 0, that's the first time to add mac,
5463                  * set mask for vlan_id 0.
5464                  */
5465                 if (vsi->vlan_num == 0) {
5466                         i40e_set_vlan_filter(vsi, 0, 1);
5467                         vsi->vlan_num = 1;
5468                 }
5469                 vlan_num = vsi->vlan_num;
5470         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
5471                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
5472                 vlan_num = 1;
5473
5474         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
5475         if (mv_f == NULL) {
5476                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5477                 return I40E_ERR_NO_MEMORY;
5478         }
5479
5480         for (i = 0; i < vlan_num; i++) {
5481                 mv_f[i].filter_type = mac_filter->filter_type;
5482                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
5483                                 ETH_ADDR_LEN);
5484         }
5485
5486         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5487                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
5488                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
5489                                         &mac_filter->mac_addr);
5490                 if (ret != I40E_SUCCESS)
5491                         goto DONE;
5492         }
5493
5494         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
5495         if (ret != I40E_SUCCESS)
5496                 goto DONE;
5497
5498         /* Add the mac addr into mac list */
5499         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5500         if (f == NULL) {
5501                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5502                 ret = I40E_ERR_NO_MEMORY;
5503                 goto DONE;
5504         }
5505         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
5506                         ETH_ADDR_LEN);
5507         f->mac_info.filter_type = mac_filter->filter_type;
5508         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5509         vsi->mac_num++;
5510
5511         ret = I40E_SUCCESS;
5512 DONE:
5513         rte_free(mv_f);
5514
5515         return ret;
5516 }
5517
5518 int
5519 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
5520 {
5521         struct i40e_mac_filter *f;
5522         struct i40e_macvlan_filter *mv_f;
5523         int i, vlan_num;
5524         enum rte_mac_filter_type filter_type;
5525         int ret = I40E_SUCCESS;
5526
5527         /* Can't find it, return an error */
5528         f = i40e_find_mac_filter(vsi, addr);
5529         if (f == NULL)
5530                 return I40E_ERR_PARAM;
5531
5532         vlan_num = vsi->vlan_num;
5533         filter_type = f->mac_info.filter_type;
5534         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5535                 filter_type == RTE_MACVLAN_HASH_MATCH) {
5536                 if (vlan_num == 0) {
5537                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
5538                         return I40E_ERR_PARAM;
5539                 }
5540         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
5541                         filter_type == RTE_MAC_HASH_MATCH)
5542                 vlan_num = 1;
5543
5544         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
5545         if (mv_f == NULL) {
5546                 PMD_DRV_LOG(ERR, "failed to allocate memory");
5547                 return I40E_ERR_NO_MEMORY;
5548         }
5549
5550         for (i = 0; i < vlan_num; i++) {
5551                 mv_f[i].filter_type = filter_type;
5552                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5553                                 ETH_ADDR_LEN);
5554         }
5555         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
5556                         filter_type == RTE_MACVLAN_HASH_MATCH) {
5557                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
5558                 if (ret != I40E_SUCCESS)
5559                         goto DONE;
5560         }
5561
5562         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
5563         if (ret != I40E_SUCCESS)
5564                 goto DONE;
5565
5566         /* Remove the mac addr into mac list */
5567         TAILQ_REMOVE(&vsi->mac_list, f, next);
5568         rte_free(f);
5569         vsi->mac_num--;
5570
5571         ret = I40E_SUCCESS;
5572 DONE:
5573         rte_free(mv_f);
5574         return ret;
5575 }
5576
5577 /* Configure hash enable flags for RSS */
5578 uint64_t
5579 i40e_config_hena(uint64_t flags)
5580 {
5581         uint64_t hena = 0;
5582
5583         if (!flags)
5584                 return hena;
5585
5586         if (flags & ETH_RSS_FRAG_IPV4)
5587                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
5588         if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
5589                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
5590         if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
5591                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5592         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
5593                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
5594         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
5595                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
5596         if (flags & ETH_RSS_FRAG_IPV6)
5597                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
5598         if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
5599                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
5600         if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
5601                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
5602         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
5603                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
5604         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
5605                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
5606         if (flags & ETH_RSS_L2_PAYLOAD)
5607                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
5608
5609         return hena;
5610 }
5611
5612 /* Parse the hash enable flags */
5613 uint64_t
5614 i40e_parse_hena(uint64_t flags)
5615 {
5616         uint64_t rss_hf = 0;
5617
5618         if (!flags)
5619                 return rss_hf;
5620         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
5621                 rss_hf |= ETH_RSS_FRAG_IPV4;
5622         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
5623                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
5624         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
5625                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
5626         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
5627                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
5628         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
5629                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
5630         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
5631                 rss_hf |= ETH_RSS_FRAG_IPV6;
5632         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
5633                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
5634         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
5635                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
5636         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
5637                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
5638         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
5639                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
5640         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
5641                 rss_hf |= ETH_RSS_L2_PAYLOAD;
5642
5643         return rss_hf;
5644 }
5645
5646 /* Disable RSS */
5647 static void
5648 i40e_pf_disable_rss(struct i40e_pf *pf)
5649 {
5650         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5651         uint64_t hena;
5652
5653         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5654         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5655         hena &= ~I40E_RSS_HENA_ALL;
5656         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
5657         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
5658         I40E_WRITE_FLUSH(hw);
5659 }
5660
5661 static int
5662 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
5663 {
5664         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
5665         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5666         int ret = 0;
5667
5668         if (!key || key_len == 0) {
5669                 PMD_DRV_LOG(DEBUG, "No key to be configured");
5670                 return 0;
5671         } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
5672                 sizeof(uint32_t)) {
5673                 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
5674                 return -EINVAL;
5675         }
5676
5677         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
5678                 struct i40e_aqc_get_set_rss_key_data *key_dw =
5679                         (struct i40e_aqc_get_set_rss_key_data *)key;
5680
5681                 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
5682                 if (ret)
5683                         PMD_INIT_LOG(ERR, "Failed to configure RSS key "
5684                                      "via AQ");
5685         } else {
5686                 uint32_t *hash_key = (uint32_t *)key;
5687                 uint16_t i;
5688
5689                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5690                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
5691                 I40E_WRITE_FLUSH(hw);
5692         }
5693
5694         return ret;
5695 }
5696
5697 static int
5698 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
5699 {
5700         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
5701         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5702         int ret;
5703
5704         if (!key || !key_len)
5705                 return -EINVAL;
5706
5707         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
5708                 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
5709                         (struct i40e_aqc_get_set_rss_key_data *)key);
5710                 if (ret) {
5711                         PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
5712                         return ret;
5713                 }
5714         } else {
5715                 uint32_t *key_dw = (uint32_t *)key;
5716                 uint16_t i;
5717
5718                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5719                         key_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
5720         }
5721         *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
5722
5723         return 0;
5724 }
5725
5726 static int
5727 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
5728 {
5729         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5730         uint64_t rss_hf;
5731         uint64_t hena;
5732         int ret;
5733
5734         ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
5735                                rss_conf->rss_key_len);
5736         if (ret)
5737                 return ret;
5738
5739         rss_hf = rss_conf->rss_hf;
5740         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5741         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5742         hena &= ~I40E_RSS_HENA_ALL;
5743         hena |= i40e_config_hena(rss_hf);
5744         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
5745         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
5746         I40E_WRITE_FLUSH(hw);
5747
5748         return 0;
5749 }
5750
5751 static int
5752 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
5753                          struct rte_eth_rss_conf *rss_conf)
5754 {
5755         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5756         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5757         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
5758         uint64_t hena;
5759
5760         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5761         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5762         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
5763                 if (rss_hf != 0) /* Enable RSS */
5764                         return -EINVAL;
5765                 return 0; /* Nothing to do */
5766         }
5767         /* RSS enabled */
5768         if (rss_hf == 0) /* Disable RSS */
5769                 return -EINVAL;
5770
5771         return i40e_hw_rss_hash_set(pf, rss_conf);
5772 }
5773
5774 static int
5775 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
5776                            struct rte_eth_rss_conf *rss_conf)
5777 {
5778         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5779         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5780         uint64_t hena;
5781
5782         i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
5783                          &rss_conf->rss_key_len);
5784
5785         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
5786         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
5787         rss_conf->rss_hf = i40e_parse_hena(hena);
5788
5789         return 0;
5790 }
5791
5792 static int
5793 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
5794 {
5795         switch (filter_type) {
5796         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
5797                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
5798                 break;
5799         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
5800                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
5801                 break;
5802         case RTE_TUNNEL_FILTER_IMAC_TENID:
5803                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
5804                 break;
5805         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
5806                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
5807                 break;
5808         case ETH_TUNNEL_FILTER_IMAC:
5809                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
5810                 break;
5811         default:
5812                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
5813                 return -EINVAL;
5814         }
5815
5816         return 0;
5817 }
5818
5819 static int
5820 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
5821                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
5822                         uint8_t add)
5823 {
5824         uint16_t ip_type;
5825         uint8_t tun_type = 0;
5826         int val, ret = 0;
5827         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5828         struct i40e_vsi *vsi = pf->main_vsi;
5829         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
5830         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
5831
5832         cld_filter = rte_zmalloc("tunnel_filter",
5833                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
5834                 0);
5835
5836         if (NULL == cld_filter) {
5837                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5838                 return -EINVAL;
5839         }
5840         pfilter = cld_filter;
5841
5842         (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
5843                         sizeof(struct ether_addr));
5844         (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
5845                         sizeof(struct ether_addr));
5846
5847         pfilter->inner_vlan = tunnel_filter->inner_vlan;
5848         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
5849                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
5850                 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
5851                                 &tunnel_filter->ip_addr,
5852                                 sizeof(pfilter->ipaddr.v4.data));
5853         } else {
5854                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
5855                 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
5856                                 &tunnel_filter->ip_addr,
5857                                 sizeof(pfilter->ipaddr.v6.data));
5858         }
5859
5860         /* check tunneled type */
5861         switch (tunnel_filter->tunnel_type) {
5862         case RTE_TUNNEL_TYPE_VXLAN:
5863                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
5864                 break;
5865         case RTE_TUNNEL_TYPE_NVGRE:
5866                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
5867                 break;
5868         default:
5869                 /* Other tunnel types is not supported. */
5870                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
5871                 rte_free(cld_filter);
5872                 return -EINVAL;
5873         }
5874
5875         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
5876                                                 &pfilter->flags);
5877         if (val < 0) {
5878                 rte_free(cld_filter);
5879                 return -EINVAL;
5880         }
5881
5882         pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
5883                 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
5884         pfilter->tenant_id = tunnel_filter->tenant_id;
5885         pfilter->queue_number = tunnel_filter->queue_id;
5886
5887         if (add)
5888                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
5889         else
5890                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
5891                                                 cld_filter, 1);
5892
5893         rte_free(cld_filter);
5894         return ret;
5895 }
5896
5897 static int
5898 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
5899 {
5900         uint8_t i;
5901
5902         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5903                 if (pf->vxlan_ports[i] == port)
5904                         return i;
5905         }
5906
5907         return -1;
5908 }
5909
5910 static int
5911 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
5912 {
5913         int  idx, ret;
5914         uint8_t filter_idx;
5915         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5916
5917         idx = i40e_get_vxlan_port_idx(pf, port);
5918
5919         /* Check if port already exists */
5920         if (idx >= 0) {
5921                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
5922                 return -EINVAL;
5923         }
5924
5925         /* Now check if there is space to add the new port */
5926         idx = i40e_get_vxlan_port_idx(pf, 0);
5927         if (idx < 0) {
5928                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
5929                         "not adding port %d", port);
5930                 return -ENOSPC;
5931         }
5932
5933         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
5934                                         &filter_idx, NULL);
5935         if (ret < 0) {
5936                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
5937                 return -1;
5938         }
5939
5940         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
5941                          port,  filter_idx);
5942
5943         /* New port: add it and mark its index in the bitmap */
5944         pf->vxlan_ports[idx] = port;
5945         pf->vxlan_bitmap |= (1 << idx);
5946
5947         if (!(pf->flags & I40E_FLAG_VXLAN))
5948                 pf->flags |= I40E_FLAG_VXLAN;
5949
5950         return 0;
5951 }
5952
5953 static int
5954 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
5955 {
5956         int idx;
5957         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5958
5959         if (!(pf->flags & I40E_FLAG_VXLAN)) {
5960                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
5961                 return -EINVAL;
5962         }
5963
5964         idx = i40e_get_vxlan_port_idx(pf, port);
5965
5966         if (idx < 0) {
5967                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
5968                 return -EINVAL;
5969         }
5970
5971         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
5972                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
5973                 return -1;
5974         }
5975
5976         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
5977                         port, idx);
5978
5979         pf->vxlan_ports[idx] = 0;
5980         pf->vxlan_bitmap &= ~(1 << idx);
5981
5982         if (!pf->vxlan_bitmap)
5983                 pf->flags &= ~I40E_FLAG_VXLAN;
5984
5985         return 0;
5986 }
5987
5988 /* Add UDP tunneling port */
5989 static int
5990 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
5991                         struct rte_eth_udp_tunnel *udp_tunnel)
5992 {
5993         int ret = 0;
5994         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5995
5996         if (udp_tunnel == NULL)
5997                 return -EINVAL;
5998
5999         switch (udp_tunnel->prot_type) {
6000         case RTE_TUNNEL_TYPE_VXLAN:
6001                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
6002                 break;
6003
6004         case RTE_TUNNEL_TYPE_GENEVE:
6005         case RTE_TUNNEL_TYPE_TEREDO:
6006                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6007                 ret = -1;
6008                 break;
6009
6010         default:
6011                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6012                 ret = -1;
6013                 break;
6014         }
6015
6016         return ret;
6017 }
6018
6019 /* Remove UDP tunneling port */
6020 static int
6021 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
6022                         struct rte_eth_udp_tunnel *udp_tunnel)
6023 {
6024         int ret = 0;
6025         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6026
6027         if (udp_tunnel == NULL)
6028                 return -EINVAL;
6029
6030         switch (udp_tunnel->prot_type) {
6031         case RTE_TUNNEL_TYPE_VXLAN:
6032                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
6033                 break;
6034         case RTE_TUNNEL_TYPE_GENEVE:
6035         case RTE_TUNNEL_TYPE_TEREDO:
6036                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6037                 ret = -1;
6038                 break;
6039         default:
6040                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
6041                 ret = -1;
6042                 break;
6043         }
6044
6045         return ret;
6046 }
6047
6048 /* Calculate the maximum number of contiguous PF queues that are configured */
6049 static int
6050 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
6051 {
6052         struct rte_eth_dev_data *data = pf->dev_data;
6053         int i, num;
6054         struct i40e_rx_queue *rxq;
6055
6056         num = 0;
6057         for (i = 0; i < pf->lan_nb_qps; i++) {
6058                 rxq = data->rx_queues[i];
6059                 if (rxq && rxq->q_set)
6060                         num++;
6061                 else
6062                         break;
6063         }
6064
6065         return num;
6066 }
6067
6068 /* Configure RSS */
6069 static int
6070 i40e_pf_config_rss(struct i40e_pf *pf)
6071 {
6072         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6073         struct rte_eth_rss_conf rss_conf;
6074         uint32_t i, lut = 0;
6075         uint16_t j, num;
6076
6077         /*
6078          * If both VMDQ and RSS enabled, not all of PF queues are configured.
6079          * It's necessary to calulate the actual PF queues that are configured.
6080          */
6081         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
6082                 num = i40e_pf_calc_configured_queues_num(pf);
6083         else
6084                 num = pf->dev_data->nb_rx_queues;
6085
6086         num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
6087         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
6088                         num);
6089
6090         if (num == 0) {
6091                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
6092                 return -ENOTSUP;
6093         }
6094
6095         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
6096                 if (j == num)
6097                         j = 0;
6098                 lut = (lut << 8) | (j & ((0x1 <<
6099                         hw->func_caps.rss_table_entry_width) - 1));
6100                 if ((i & 3) == 3)
6101                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
6102         }
6103
6104         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
6105         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
6106                 i40e_pf_disable_rss(pf);
6107                 return 0;
6108         }
6109         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
6110                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
6111                 /* Random default keys */
6112                 static uint32_t rss_key_default[] = {0x6b793944,
6113                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
6114                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
6115                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
6116
6117                 rss_conf.rss_key = (uint8_t *)rss_key_default;
6118                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6119                                                         sizeof(uint32_t);
6120         }
6121
6122         return i40e_hw_rss_hash_set(pf, &rss_conf);
6123 }
6124
6125 static int
6126 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
6127                                struct rte_eth_tunnel_filter_conf *filter)
6128 {
6129         if (pf == NULL || filter == NULL) {
6130                 PMD_DRV_LOG(ERR, "Invalid parameter");
6131                 return -EINVAL;
6132         }
6133
6134         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
6135                 PMD_DRV_LOG(ERR, "Invalid queue ID");
6136                 return -EINVAL;
6137         }
6138
6139         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
6140                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
6141                 return -EINVAL;
6142         }
6143
6144         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
6145                 (is_zero_ether_addr(filter->outer_mac))) {
6146                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
6147                 return -EINVAL;
6148         }
6149
6150         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
6151                 (is_zero_ether_addr(filter->inner_mac))) {
6152                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
6153                 return -EINVAL;
6154         }
6155
6156         return 0;
6157 }
6158
6159 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
6160 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
6161 static int
6162 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
6163 {
6164         uint32_t val, reg;
6165         int ret = -EINVAL;
6166
6167         val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
6168         PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
6169
6170         if (len == 3) {
6171                 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
6172         } else if (len == 4) {
6173                 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
6174         } else {
6175                 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
6176                 return ret;
6177         }
6178
6179         if (reg != val) {
6180                 ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
6181                                                    reg, NULL);
6182                 if (ret != 0)
6183                         return ret;
6184         } else {
6185                 ret = 0;
6186         }
6187         PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
6188                     I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
6189
6190         return ret;
6191 }
6192
6193 static int
6194 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
6195 {
6196         int ret = -EINVAL;
6197
6198         if (!hw || !cfg)
6199                 return -EINVAL;
6200
6201         switch (cfg->cfg_type) {
6202         case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
6203                 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
6204                 break;
6205         default:
6206                 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
6207                 break;
6208         }
6209
6210         return ret;
6211 }
6212
6213 static int
6214 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
6215                                enum rte_filter_op filter_op,
6216                                void *arg)
6217 {
6218         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6219         int ret = I40E_ERR_PARAM;
6220
6221         switch (filter_op) {
6222         case RTE_ETH_FILTER_SET:
6223                 ret = i40e_dev_global_config_set(hw,
6224                         (struct rte_eth_global_cfg *)arg);
6225                 break;
6226         default:
6227                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6228                 break;
6229         }
6230
6231         return ret;
6232 }
6233
6234 static int
6235 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
6236                           enum rte_filter_op filter_op,
6237                           void *arg)
6238 {
6239         struct rte_eth_tunnel_filter_conf *filter;
6240         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6241         int ret = I40E_SUCCESS;
6242
6243         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
6244
6245         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
6246                 return I40E_ERR_PARAM;
6247
6248         switch (filter_op) {
6249         case RTE_ETH_FILTER_NOP:
6250                 if (!(pf->flags & I40E_FLAG_VXLAN))
6251                         ret = I40E_NOT_SUPPORTED;
6252                 break;
6253         case RTE_ETH_FILTER_ADD:
6254                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
6255                 break;
6256         case RTE_ETH_FILTER_DELETE:
6257                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
6258                 break;
6259         default:
6260                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6261                 ret = I40E_ERR_PARAM;
6262                 break;
6263         }
6264
6265         return ret;
6266 }
6267
6268 static int
6269 i40e_pf_config_mq_rx(struct i40e_pf *pf)
6270 {
6271         int ret = 0;
6272         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
6273
6274         /* RSS setup */
6275         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
6276                 ret = i40e_pf_config_rss(pf);
6277         else
6278                 i40e_pf_disable_rss(pf);
6279
6280         return ret;
6281 }
6282
6283 /* Get the symmetric hash enable configurations per port */
6284 static void
6285 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
6286 {
6287         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
6288
6289         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
6290 }
6291
6292 /* Set the symmetric hash enable configurations per port */
6293 static void
6294 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
6295 {
6296         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
6297
6298         if (enable > 0) {
6299                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
6300                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6301                                                         "been enabled");
6302                         return;
6303                 }
6304                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6305         } else {
6306                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
6307                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
6308                                                         "been disabled");
6309                         return;
6310                 }
6311                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6312         }
6313         I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
6314         I40E_WRITE_FLUSH(hw);
6315 }
6316
6317 /*
6318  * Get global configurations of hash function type and symmetric hash enable
6319  * per flow type (pctype). Note that global configuration means it affects all
6320  * the ports on the same NIC.
6321  */
6322 static int
6323 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
6324                                    struct rte_eth_hash_global_conf *g_cfg)
6325 {
6326         uint32_t reg, mask = I40E_FLOW_TYPES;
6327         uint16_t i;
6328         enum i40e_filter_pctype pctype;
6329
6330         memset(g_cfg, 0, sizeof(*g_cfg));
6331         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
6332         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
6333                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
6334         else
6335                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
6336         PMD_DRV_LOG(DEBUG, "Hash function is %s",
6337                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
6338
6339         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
6340                 if (!(mask & (1UL << i)))
6341                         continue;
6342                 mask &= ~(1UL << i);
6343                 /* Bit set indicats the coresponding flow type is supported */
6344                 g_cfg->valid_bit_mask[0] |= (1UL << i);
6345                 pctype = i40e_flowtype_to_pctype(i);
6346                 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
6347                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
6348                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
6349         }
6350
6351         return 0;
6352 }
6353
6354 static int
6355 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
6356 {
6357         uint32_t i;
6358         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
6359
6360         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
6361                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
6362                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
6363                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
6364                                                 g_cfg->hash_func);
6365                 return -EINVAL;
6366         }
6367
6368         /*
6369          * As i40e supports less than 32 flow types, only first 32 bits need to
6370          * be checked.
6371          */
6372         mask0 = g_cfg->valid_bit_mask[0];
6373         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
6374                 if (i == 0) {
6375                         /* Check if any unsupported flow type configured */
6376                         if ((mask0 | i40e_mask) ^ i40e_mask)
6377                                 goto mask_err;
6378                 } else {
6379                         if (g_cfg->valid_bit_mask[i])
6380                                 goto mask_err;
6381                 }
6382         }
6383
6384         return 0;
6385
6386 mask_err:
6387         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
6388
6389         return -EINVAL;
6390 }
6391
6392 /*
6393  * Set global configurations of hash function type and symmetric hash enable
6394  * per flow type (pctype). Note any modifying global configuration will affect
6395  * all the ports on the same NIC.
6396  */
6397 static int
6398 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
6399                                    struct rte_eth_hash_global_conf *g_cfg)
6400 {
6401         int ret;
6402         uint16_t i;
6403         uint32_t reg;
6404         uint32_t mask0 = g_cfg->valid_bit_mask[0];
6405         enum i40e_filter_pctype pctype;
6406
6407         /* Check the input parameters */
6408         ret = i40e_hash_global_config_check(g_cfg);
6409         if (ret < 0)
6410                 return ret;
6411
6412         for (i = 0; mask0 && i < UINT32_BIT; i++) {
6413                 if (!(mask0 & (1UL << i)))
6414                         continue;
6415                 mask0 &= ~(1UL << i);
6416                 pctype = i40e_flowtype_to_pctype(i);
6417                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
6418                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
6419                 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
6420         }
6421
6422         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
6423         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
6424                 /* Toeplitz */
6425                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
6426                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
6427                                                                 "Toeplitz");
6428                         goto out;
6429                 }
6430                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
6431         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
6432                 /* Simple XOR */
6433                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
6434                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
6435                                                         "Simple XOR");
6436                         goto out;
6437                 }
6438                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
6439         } else
6440                 /* Use the default, and keep it as it is */
6441                 goto out;
6442
6443         I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
6444
6445 out:
6446         I40E_WRITE_FLUSH(hw);
6447
6448         return 0;
6449 }
6450
6451 /**
6452  * Valid input sets for hash and flow director filters per PCTYPE
6453  */
6454 static uint64_t
6455 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
6456                 enum rte_filter_type filter)
6457 {
6458         uint64_t valid;
6459
6460         static const uint64_t valid_hash_inset_table[] = {
6461                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6462                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6463                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6464                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
6465                         I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
6466                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6467                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6468                         I40E_INSET_FLEX_PAYLOAD,
6469                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6470                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6471                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6472                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6473                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6474                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6475                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6476                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6477                         I40E_INSET_FLEX_PAYLOAD,
6478                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6479                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6480                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6481                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6482                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6483                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6484                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6485                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6486                         I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
6487                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6488                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6489                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6490                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6491                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6492                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6493                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6494                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6495                         I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6496                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6497                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6498                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6499                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
6500                         I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
6501                         I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
6502                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6503                         I40E_INSET_FLEX_PAYLOAD,
6504                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6505                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6506                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6507                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6508                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6509                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
6510                         I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
6511                         I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
6512                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6513                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6514                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6515                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6516                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6517                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6518                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6519                         I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
6520                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6521                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6522                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6523                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6524                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6525                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6526                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6527                         I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
6528                         I40E_INSET_FLEX_PAYLOAD,
6529                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6530                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6531                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6532                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6533                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6534                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6535                         I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
6536                         I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
6537                         I40E_INSET_FLEX_PAYLOAD,
6538                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6539                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6540                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6541                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
6542                         I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
6543                         I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
6544                         I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
6545                         I40E_INSET_FLEX_PAYLOAD,
6546                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6547                         I40E_INSET_DMAC | I40E_INSET_SMAC |
6548                         I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
6549                         I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
6550                         I40E_INSET_FLEX_PAYLOAD,
6551         };
6552
6553         /**
6554          * Flow director supports only fields defined in
6555          * union rte_eth_fdir_flow.
6556          */
6557         static const uint64_t valid_fdir_inset_table[] = {
6558                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6559                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6560                 I40E_INSET_FLEX_PAYLOAD,
6561                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6562                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6563                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6564                 I40E_INSET_FLEX_PAYLOAD,
6565                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6566                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6567                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6568                 I40E_INSET_FLEX_PAYLOAD,
6569                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6570                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6571                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6572                 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6573                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6574                 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6575                 I40E_INSET_FLEX_PAYLOAD,
6576                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6577                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6578                 I40E_INSET_FLEX_PAYLOAD,
6579                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6580                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6581                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6582                 I40E_INSET_FLEX_PAYLOAD,
6583                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6584                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6585                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6586                 I40E_INSET_FLEX_PAYLOAD,
6587                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6588                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6589                 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6590                 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
6591                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6592                 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6593                 I40E_INSET_FLEX_PAYLOAD,
6594                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6595                 I40E_INSET_LAST_ETHER_TYPE | I40E_INSET_FLEX_PAYLOAD,
6596         };
6597
6598         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
6599                 return 0;
6600         if (filter == RTE_ETH_FILTER_HASH)
6601                 valid = valid_hash_inset_table[pctype];
6602         else
6603                 valid = valid_fdir_inset_table[pctype];
6604
6605         return valid;
6606 }
6607
6608 /**
6609  * Validate if the input set is allowed for a specific PCTYPE
6610  */
6611 static int
6612 i40e_validate_input_set(enum i40e_filter_pctype pctype,
6613                 enum rte_filter_type filter, uint64_t inset)
6614 {
6615         uint64_t valid;
6616
6617         valid = i40e_get_valid_input_set(pctype, filter);
6618         if (inset & (~valid))
6619                 return -EINVAL;
6620
6621         return 0;
6622 }
6623
6624 /* default input set fields combination per pctype */
6625 static uint64_t
6626 i40e_get_default_input_set(uint16_t pctype)
6627 {
6628         static const uint64_t default_inset_table[] = {
6629                 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
6630                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
6631                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
6632                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6633                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6634                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
6635                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6636                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6637                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
6638                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
6639                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6640                         I40E_INSET_SCTP_VT,
6641                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
6642                         I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
6643                 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
6644                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
6645                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
6646                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6647                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6648                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
6649                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6650                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
6651                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
6652                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
6653                         I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
6654                         I40E_INSET_SCTP_VT,
6655                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
6656                         I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
6657                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
6658                         I40E_INSET_LAST_ETHER_TYPE,
6659         };
6660
6661         if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
6662                 return 0;
6663
6664         return default_inset_table[pctype];
6665 }
6666
6667 /**
6668  * Parse the input set from index to logical bit masks
6669  */
6670 static int
6671 i40e_parse_input_set(uint64_t *inset,
6672                      enum i40e_filter_pctype pctype,
6673                      enum rte_eth_input_set_field *field,
6674                      uint16_t size)
6675 {
6676         uint16_t i, j;
6677         int ret = -EINVAL;
6678
6679         static const struct {
6680                 enum rte_eth_input_set_field field;
6681                 uint64_t inset;
6682         } inset_convert_table[] = {
6683                 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
6684                 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
6685                 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
6686                 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
6687                 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
6688                 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
6689                 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
6690                 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
6691                 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
6692                 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
6693                 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
6694                 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
6695                 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
6696                 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
6697                         I40E_INSET_IPV6_NEXT_HDR},
6698                 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
6699                 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
6700                 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
6701                 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
6702                 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
6703                 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
6704                 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
6705                         I40E_INSET_SCTP_VT},
6706                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
6707                         I40E_INSET_TUNNEL_DMAC},
6708                 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
6709                         I40E_INSET_VLAN_TUNNEL},
6710                 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
6711                         I40E_INSET_TUNNEL_ID},
6712                 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
6713                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
6714                         I40E_INSET_FLEX_PAYLOAD_W1},
6715                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
6716                         I40E_INSET_FLEX_PAYLOAD_W2},
6717                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
6718                         I40E_INSET_FLEX_PAYLOAD_W3},
6719                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
6720                         I40E_INSET_FLEX_PAYLOAD_W4},
6721                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
6722                         I40E_INSET_FLEX_PAYLOAD_W5},
6723                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
6724                         I40E_INSET_FLEX_PAYLOAD_W6},
6725                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
6726                         I40E_INSET_FLEX_PAYLOAD_W7},
6727                 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
6728                         I40E_INSET_FLEX_PAYLOAD_W8},
6729         };
6730
6731         if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
6732                 return ret;
6733
6734         /* Only one item allowed for default or all */
6735         if (size == 1) {
6736                 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
6737                         *inset = i40e_get_default_input_set(pctype);
6738                         return 0;
6739                 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
6740                         *inset = I40E_INSET_NONE;
6741                         return 0;
6742                 }
6743         }
6744
6745         for (i = 0, *inset = 0; i < size; i++) {
6746                 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
6747                         if (field[i] == inset_convert_table[j].field) {
6748                                 *inset |= inset_convert_table[j].inset;
6749                                 break;
6750                         }
6751                 }
6752
6753                 /* It contains unsupported input set, return immediately */
6754                 if (j == RTE_DIM(inset_convert_table))
6755                         return ret;
6756         }
6757
6758         return 0;
6759 }
6760
6761 /**
6762  * Translate the input set from bit masks to register aware bit masks
6763  * and vice versa
6764  */
6765 static uint64_t
6766 i40e_translate_input_set_reg(uint64_t input)
6767 {
6768         uint64_t val = 0;
6769         uint16_t i;
6770
6771         static const struct {
6772                 uint64_t inset;
6773                 uint64_t inset_reg;
6774         } inset_map[] = {
6775                 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
6776                 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
6777                 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
6778                 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
6779                 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
6780                 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
6781                 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
6782                 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
6783                 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
6784                 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
6785                 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
6786                 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
6787                 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
6788                 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
6789                 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
6790                 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
6791                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
6792                 {I40E_INSET_TUNNEL_DMAC,
6793                         I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
6794                 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
6795                 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
6796                 {I40E_INSET_TUNNEL_SRC_PORT,
6797                         I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
6798                 {I40E_INSET_TUNNEL_DST_PORT,
6799                         I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
6800                 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
6801                 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
6802                 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
6803                 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
6804                 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
6805                 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
6806                 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
6807                 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
6808                 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
6809         };
6810
6811         if (input == 0)
6812                 return val;
6813
6814         /* Translate input set to register aware inset */
6815         for (i = 0; i < RTE_DIM(inset_map); i++) {
6816                 if (input & inset_map[i].inset)
6817                         val |= inset_map[i].inset_reg;
6818         }
6819
6820         return val;
6821 }
6822
6823 static uint8_t
6824 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
6825 {
6826         uint8_t i, idx = 0;
6827
6828         static const struct {
6829                 uint64_t inset;
6830                 uint32_t mask;
6831         } inset_mask_map[] = {
6832                 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
6833                 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
6834                 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
6835                 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
6836         };
6837
6838         if (!inset || !mask || !nb_elem)
6839                 return 0;
6840
6841         if (!inset && nb_elem >= I40E_INSET_MASK_NUM_REG) {
6842                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++)
6843                         mask[i] = 0;
6844                 return I40E_INSET_MASK_NUM_REG;
6845         }
6846
6847         for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
6848                 if (idx >= nb_elem)
6849                         break;
6850                 if (inset & inset_mask_map[i].inset) {
6851                         mask[idx] = inset_mask_map[i].mask;
6852                         idx++;
6853                 }
6854         }
6855
6856         return idx;
6857 }
6858
6859 static uint64_t
6860 i40e_get_reg_inset(struct i40e_hw *hw, enum rte_filter_type filter,
6861                             enum i40e_filter_pctype pctype)
6862 {
6863         uint64_t reg = 0;
6864
6865         if (filter == RTE_ETH_FILTER_HASH) {
6866                 reg = I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(1, pctype));
6867                 reg <<= I40E_32_BIT_WIDTH;
6868                 reg |= I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(0, pctype));
6869         } else if (filter == RTE_ETH_FILTER_FDIR) {
6870                 reg = I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 1));
6871                 reg <<= I40E_32_BIT_WIDTH;
6872                 reg |= I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 0));
6873         }
6874
6875         return reg;
6876 }
6877
6878 static void
6879 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
6880 {
6881         uint32_t reg = I40E_READ_REG(hw, addr);
6882
6883         PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
6884         if (reg != val)
6885                 I40E_WRITE_REG(hw, addr, val);
6886         PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
6887                     (uint32_t)I40E_READ_REG(hw, addr));
6888 }
6889
6890 static int
6891 i40e_set_hash_inset_mask(struct i40e_hw *hw,
6892                          enum i40e_filter_pctype pctype,
6893                          enum rte_filter_input_set_op op,
6894                          uint32_t *mask_reg,
6895                          uint8_t num)
6896 {
6897         uint32_t reg;
6898         uint8_t i;
6899
6900         if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
6901                 return -EINVAL;
6902
6903         if (op == RTE_ETH_INPUT_SET_SELECT) {
6904                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6905                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
6906                                              0);
6907                         if (i >= num)
6908                                 continue;
6909                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
6910                                              mask_reg[i]);
6911                 }
6912         } else if (op == RTE_ETH_INPUT_SET_ADD) {
6913                 uint8_t j, count = 0;
6914
6915                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6916                         reg = I40E_READ_REG(hw, I40E_GLQF_HASH_MSK(i, pctype));
6917                         if (reg & I40E_GLQF_HASH_MSK_FIELD)
6918                                 count++;
6919                 }
6920                 if (count + num > I40E_INSET_MASK_NUM_REG)
6921                         return -EINVAL;
6922
6923                 for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
6924                         i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
6925                                              mask_reg[j]);
6926         }
6927
6928         return 0;
6929 }
6930
6931 static int
6932 i40e_set_fd_inset_mask(struct i40e_hw *hw,
6933                        enum i40e_filter_pctype pctype,
6934                        enum rte_filter_input_set_op op,
6935                        uint32_t *mask_reg,
6936                        uint8_t num)
6937 {
6938         uint32_t reg;
6939         uint8_t i;
6940
6941         if (!mask_reg || num > RTE_ETH_INPUT_SET_SELECT)
6942                 return -EINVAL;
6943
6944         if (op == RTE_ETH_INPUT_SET_SELECT) {
6945                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6946                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
6947                                              0);
6948                         if (i >= num)
6949                                 continue;
6950                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
6951                                              mask_reg[i]);
6952                 }
6953         } else if (op == RTE_ETH_INPUT_SET_ADD) {
6954                 uint8_t j, count = 0;
6955
6956                 for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) {
6957                         reg = I40E_READ_REG(hw, I40E_GLQF_FD_MSK(i, pctype));
6958                         if (reg & I40E_GLQF_FD_MSK_FIELD)
6959                                 count++;
6960                 }
6961                 if (count + num > I40E_INSET_MASK_NUM_REG)
6962                         return -EINVAL;
6963
6964                 for (i = count, j = 0; i < I40E_INSET_MASK_NUM_REG; i++, j++)
6965                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
6966                                              mask_reg[j]);
6967         }
6968
6969         return 0;
6970 }
6971
6972 int
6973 i40e_filter_inset_select(struct i40e_hw *hw,
6974                          struct rte_eth_input_set_conf *conf,
6975                          enum rte_filter_type filter)
6976 {
6977         enum i40e_filter_pctype pctype;
6978         uint64_t inset_reg = 0, input_set;
6979         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG];
6980         uint8_t num;
6981         int ret;
6982
6983         if (!hw || !conf) {
6984                 PMD_DRV_LOG(ERR, "Invalid pointer");
6985                 return -EFAULT;
6986         }
6987
6988         pctype = i40e_flowtype_to_pctype(conf->flow_type);
6989         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
6990                 PMD_DRV_LOG(ERR, "Not supported flow type (%u)",
6991                             conf->flow_type);
6992                 return -EINVAL;
6993         }
6994         if (filter != RTE_ETH_FILTER_HASH && filter != RTE_ETH_FILTER_FDIR) {
6995                 PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
6996                 return -EINVAL;
6997         }
6998
6999         ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7000                                    conf->inset_size);
7001         if (ret) {
7002                 PMD_DRV_LOG(ERR, "Failed to parse input set");
7003                 return -EINVAL;
7004         }
7005         if (i40e_validate_input_set(pctype, filter, input_set) != 0) {
7006                 PMD_DRV_LOG(ERR, "Invalid input set");
7007                 return -EINVAL;
7008         }
7009
7010         if (conf->op == RTE_ETH_INPUT_SET_ADD) {
7011                 inset_reg |= i40e_get_reg_inset(hw, filter, pctype);
7012         } else if (conf->op != RTE_ETH_INPUT_SET_SELECT) {
7013                 PMD_DRV_LOG(ERR, "Unsupported input set operation");
7014                 return -EINVAL;
7015         }
7016         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7017                                            I40E_INSET_MASK_NUM_REG);
7018         inset_reg |= i40e_translate_input_set_reg(input_set);
7019
7020         if (filter == RTE_ETH_FILTER_HASH) {
7021                 ret = i40e_set_hash_inset_mask(hw, pctype, conf->op, mask_reg,
7022                                                num);
7023                 if (ret)
7024                         return -EINVAL;
7025
7026                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7027                                       (uint32_t)(inset_reg & UINT32_MAX));
7028                 i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7029                                      (uint32_t)((inset_reg >>
7030                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7031         } else if (filter == RTE_ETH_FILTER_FDIR) {
7032                 ret = i40e_set_fd_inset_mask(hw, pctype, conf->op, mask_reg,
7033                                              num);
7034                 if (ret)
7035                         return -EINVAL;
7036
7037                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7038                                       (uint32_t)(inset_reg & UINT32_MAX));
7039                 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7040                                      (uint32_t)((inset_reg >>
7041                                      I40E_32_BIT_WIDTH) & UINT32_MAX));
7042         } else {
7043                 PMD_DRV_LOG(ERR, "Not supported filter type (%u)", filter);
7044                 return -EINVAL;
7045         }
7046         I40E_WRITE_FLUSH(hw);
7047
7048         return 0;
7049 }
7050
7051 static int
7052 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7053 {
7054         int ret = 0;
7055
7056         if (!hw || !info) {
7057                 PMD_DRV_LOG(ERR, "Invalid pointer");
7058                 return -EFAULT;
7059         }
7060
7061         switch (info->info_type) {
7062         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7063                 i40e_get_symmetric_hash_enable_per_port(hw,
7064                                         &(info->info.enable));
7065                 break;
7066         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7067                 ret = i40e_get_hash_filter_global_config(hw,
7068                                 &(info->info.global_conf));
7069                 break;
7070         default:
7071                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7072                                                         info->info_type);
7073                 ret = -EINVAL;
7074                 break;
7075         }
7076
7077         return ret;
7078 }
7079
7080 static int
7081 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7082 {
7083         int ret = 0;
7084
7085         if (!hw || !info) {
7086                 PMD_DRV_LOG(ERR, "Invalid pointer");
7087                 return -EFAULT;
7088         }
7089
7090         switch (info->info_type) {
7091         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7092                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
7093                 break;
7094         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7095                 ret = i40e_set_hash_filter_global_config(hw,
7096                                 &(info->info.global_conf));
7097                 break;
7098         case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
7099                 ret = i40e_filter_inset_select(hw,
7100                                                &(info->info.input_set_conf),
7101                                                RTE_ETH_FILTER_HASH);
7102                 break;
7103
7104         default:
7105                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7106                                                         info->info_type);
7107                 ret = -EINVAL;
7108                 break;
7109         }
7110
7111         return ret;
7112 }
7113
7114 /* Operations for hash function */
7115 static int
7116 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
7117                       enum rte_filter_op filter_op,
7118                       void *arg)
7119 {
7120         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7121         int ret = 0;
7122
7123         switch (filter_op) {
7124         case RTE_ETH_FILTER_NOP:
7125                 break;
7126         case RTE_ETH_FILTER_GET:
7127                 ret = i40e_hash_filter_get(hw,
7128                         (struct rte_eth_hash_filter_info *)arg);
7129                 break;
7130         case RTE_ETH_FILTER_SET:
7131                 ret = i40e_hash_filter_set(hw,
7132                         (struct rte_eth_hash_filter_info *)arg);
7133                 break;
7134         default:
7135                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
7136                                                                 filter_op);
7137                 ret = -ENOTSUP;
7138                 break;
7139         }
7140
7141         return ret;
7142 }
7143
7144 /*
7145  * Configure ethertype filter, which can director packet by filtering
7146  * with mac address and ether_type or only ether_type
7147  */
7148 static int
7149 i40e_ethertype_filter_set(struct i40e_pf *pf,
7150                         struct rte_eth_ethertype_filter *filter,
7151                         bool add)
7152 {
7153         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7154         struct i40e_control_filter_stats stats;
7155         uint16_t flags = 0;
7156         int ret;
7157
7158         if (filter->queue >= pf->dev_data->nb_rx_queues) {
7159                 PMD_DRV_LOG(ERR, "Invalid queue ID");
7160                 return -EINVAL;
7161         }
7162         if (filter->ether_type == ETHER_TYPE_IPv4 ||
7163                 filter->ether_type == ETHER_TYPE_IPv6) {
7164                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
7165                         " control packet filter.", filter->ether_type);
7166                 return -EINVAL;
7167         }
7168         if (filter->ether_type == ETHER_TYPE_VLAN)
7169                 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
7170                         " not supported.");
7171
7172         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
7173                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
7174         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
7175                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
7176         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
7177
7178         memset(&stats, 0, sizeof(stats));
7179         ret = i40e_aq_add_rem_control_packet_filter(hw,
7180                         filter->mac_addr.addr_bytes,
7181                         filter->ether_type, flags,
7182                         pf->main_vsi->seid,
7183                         filter->queue, add, &stats, NULL);
7184
7185         PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
7186                          " mac_etype_used = %u, etype_used = %u,"
7187                          " mac_etype_free = %u, etype_free = %u\n",
7188                          ret, stats.mac_etype_used, stats.etype_used,
7189                          stats.mac_etype_free, stats.etype_free);
7190         if (ret < 0)
7191                 return -ENOSYS;
7192         return 0;
7193 }
7194
7195 /*
7196  * Handle operations for ethertype filter.
7197  */
7198 static int
7199 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
7200                                 enum rte_filter_op filter_op,
7201                                 void *arg)
7202 {
7203         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7204         int ret = 0;
7205
7206         if (filter_op == RTE_ETH_FILTER_NOP)
7207                 return ret;
7208
7209         if (arg == NULL) {
7210                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
7211                             filter_op);
7212                 return -EINVAL;
7213         }
7214
7215         switch (filter_op) {
7216         case RTE_ETH_FILTER_ADD:
7217                 ret = i40e_ethertype_filter_set(pf,
7218                         (struct rte_eth_ethertype_filter *)arg,
7219                         TRUE);
7220                 break;
7221         case RTE_ETH_FILTER_DELETE:
7222                 ret = i40e_ethertype_filter_set(pf,
7223                         (struct rte_eth_ethertype_filter *)arg,
7224                         FALSE);
7225                 break;
7226         default:
7227                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
7228                 ret = -ENOSYS;
7229                 break;
7230         }
7231         return ret;
7232 }
7233
7234 static int
7235 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
7236                      enum rte_filter_type filter_type,
7237                      enum rte_filter_op filter_op,
7238                      void *arg)
7239 {
7240         int ret = 0;
7241
7242         if (dev == NULL)
7243                 return -EINVAL;
7244
7245         switch (filter_type) {
7246         case RTE_ETH_FILTER_NONE:
7247                 /* For global configuration */
7248                 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
7249                 break;
7250         case RTE_ETH_FILTER_HASH:
7251                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
7252                 break;
7253         case RTE_ETH_FILTER_MACVLAN:
7254                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
7255                 break;
7256         case RTE_ETH_FILTER_ETHERTYPE:
7257                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
7258                 break;
7259         case RTE_ETH_FILTER_TUNNEL:
7260                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
7261                 break;
7262         case RTE_ETH_FILTER_FDIR:
7263                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
7264                 break;
7265         default:
7266                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
7267                                                         filter_type);
7268                 ret = -EINVAL;
7269                 break;
7270         }
7271
7272         return ret;
7273 }
7274
7275 /*
7276  * Check and enable Extended Tag.
7277  * Enabling Extended Tag is important for 40G performance.
7278  */
7279 static void
7280 i40e_enable_extended_tag(struct rte_eth_dev *dev)
7281 {
7282         uint32_t buf = 0;
7283         int ret;
7284
7285         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
7286                                       PCI_DEV_CAP_REG);
7287         if (ret < 0) {
7288                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
7289                             PCI_DEV_CAP_REG);
7290                 return;
7291         }
7292         if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
7293                 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
7294                 return;
7295         }
7296
7297         buf = 0;
7298         ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
7299                                       PCI_DEV_CTRL_REG);
7300         if (ret < 0) {
7301                 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
7302                             PCI_DEV_CTRL_REG);
7303                 return;
7304         }
7305         if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
7306                 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
7307                 return;
7308         }
7309         buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
7310         ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
7311                                        PCI_DEV_CTRL_REG);
7312         if (ret < 0) {
7313                 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
7314                             PCI_DEV_CTRL_REG);
7315                 return;
7316         }
7317 }
7318
7319 /*
7320  * As some registers wouldn't be reset unless a global hardware reset,
7321  * hardware initialization is needed to put those registers into an
7322  * expected initial state.
7323  */
7324 static void
7325 i40e_hw_init(struct rte_eth_dev *dev)
7326 {
7327         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7328
7329         i40e_enable_extended_tag(dev);
7330
7331         /* clear the PF Queue Filter control register */
7332         I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
7333
7334         /* Disable symmetric hash per port */
7335         i40e_set_symmetric_hash_enable_per_port(hw, 0);
7336 }
7337
7338 enum i40e_filter_pctype
7339 i40e_flowtype_to_pctype(uint16_t flow_type)
7340 {
7341         static const enum i40e_filter_pctype pctype_table[] = {
7342                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
7343                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
7344                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7345                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
7346                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7347                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
7348                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7349                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
7350                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7351                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
7352                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
7353                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
7354                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
7355                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
7356                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
7357                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
7358                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
7359                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
7360                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
7361         };
7362
7363         return pctype_table[flow_type];
7364 }
7365
7366 uint16_t
7367 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
7368 {
7369         static const uint16_t flowtype_table[] = {
7370                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
7371                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7372                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
7373                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7374                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
7375                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7376                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
7377                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7378                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
7379                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
7380                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7381                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
7382                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7383                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
7384                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7385                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
7386                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7387                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
7388                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
7389         };
7390
7391         return flowtype_table[pctype];
7392 }
7393
7394 /*
7395  * On X710, performance number is far from the expectation on recent firmware
7396  * versions; on XL710, performance number is also far from the expectation on
7397  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
7398  * mode is enabled and port MAC address is equal to the packet destination MAC
7399  * address. The fix for this issue may not be integrated in the following
7400  * firmware version. So the workaround in software driver is needed. It needs
7401  * to modify the initial values of 3 internal only registers for both X710 and
7402  * XL710. Note that the values for X710 or XL710 could be different, and the
7403  * workaround can be removed when it is fixed in firmware in the future.
7404  */
7405
7406 /* For both X710 and XL710 */
7407 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
7408 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
7409
7410 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
7411 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
7412
7413 /* For X710 */
7414 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
7415 /* For XL710 */
7416 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
7417 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
7418
7419 static void
7420 i40e_configure_registers(struct i40e_hw *hw)
7421 {
7422         static struct {
7423                 uint32_t addr;
7424                 uint64_t val;
7425         } reg_table[] = {
7426                 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
7427                 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
7428                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
7429         };
7430         uint64_t reg;
7431         uint32_t i;
7432         int ret;
7433
7434         for (i = 0; i < RTE_DIM(reg_table); i++) {
7435                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
7436                         if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
7437                                 reg_table[i].val =
7438                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
7439                         else /* For X710 */
7440                                 reg_table[i].val =
7441                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
7442                 }
7443
7444                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
7445                                                         &reg, NULL);
7446                 if (ret < 0) {
7447                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
7448                                                         reg_table[i].addr);
7449                         break;
7450                 }
7451                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
7452                                                 reg_table[i].addr, reg);
7453                 if (reg == reg_table[i].val)
7454                         continue;
7455
7456                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
7457                                                 reg_table[i].val, NULL);
7458                 if (ret < 0) {
7459                         PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
7460                                 "address of 0x%"PRIx32, reg_table[i].val,
7461                                                         reg_table[i].addr);
7462                         break;
7463                 }
7464                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
7465                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
7466         }
7467 }
7468
7469 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
7470 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
7471 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
7472 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
7473 static int
7474 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
7475 {
7476         uint32_t reg;
7477         int ret;
7478
7479         if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
7480                 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
7481                 return -EINVAL;
7482         }
7483
7484         /* Configure for double VLAN RX stripping */
7485         reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
7486         if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
7487                 reg |= I40E_VSI_TSR_QINQ_CONFIG;
7488                 ret = i40e_aq_debug_write_register(hw,
7489                                                    I40E_VSI_TSR(vsi->vsi_id),
7490                                                    reg, NULL);
7491                 if (ret < 0) {
7492                         PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
7493                                     vsi->vsi_id);
7494                         return I40E_ERR_CONFIG;
7495                 }
7496         }
7497
7498         /* Configure for double VLAN TX insertion */
7499         reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
7500         if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
7501                 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
7502                 ret = i40e_aq_debug_write_register(hw,
7503                                                    I40E_VSI_L2TAGSTXVALID(
7504                                                    vsi->vsi_id), reg, NULL);
7505                 if (ret < 0) {
7506                         PMD_DRV_LOG(ERR, "Failed to update "
7507                                 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
7508                         return I40E_ERR_CONFIG;
7509                 }
7510         }
7511
7512         return 0;
7513 }
7514
7515 /**
7516  * i40e_aq_add_mirror_rule
7517  * @hw: pointer to the hardware structure
7518  * @seid: VEB seid to add mirror rule to
7519  * @dst_id: destination vsi seid
7520  * @entries: Buffer which contains the entities to be mirrored
7521  * @count: number of entities contained in the buffer
7522  * @rule_id:the rule_id of the rule to be added
7523  *
7524  * Add a mirror rule for a given veb.
7525  *
7526  **/
7527 static enum i40e_status_code
7528 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
7529                         uint16_t seid, uint16_t dst_id,
7530                         uint16_t rule_type, uint16_t *entries,
7531                         uint16_t count, uint16_t *rule_id)
7532 {
7533         struct i40e_aq_desc desc;
7534         struct i40e_aqc_add_delete_mirror_rule cmd;
7535         struct i40e_aqc_add_delete_mirror_rule_completion *resp =
7536                 (struct i40e_aqc_add_delete_mirror_rule_completion *)
7537                 &desc.params.raw;
7538         uint16_t buff_len;
7539         enum i40e_status_code status;
7540
7541         i40e_fill_default_direct_cmd_desc(&desc,
7542                                           i40e_aqc_opc_add_mirror_rule);
7543         memset(&cmd, 0, sizeof(cmd));
7544
7545         buff_len = sizeof(uint16_t) * count;
7546         desc.datalen = rte_cpu_to_le_16(buff_len);
7547         if (buff_len > 0)
7548                 desc.flags |= rte_cpu_to_le_16(
7549                         (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
7550         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
7551                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
7552         cmd.num_entries = rte_cpu_to_le_16(count);
7553         cmd.seid = rte_cpu_to_le_16(seid);
7554         cmd.destination = rte_cpu_to_le_16(dst_id);
7555
7556         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
7557         status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
7558         PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
7559                          "rule_id = %u"
7560                          " mirror_rules_used = %u, mirror_rules_free = %u,",
7561                          hw->aq.asq_last_status, resp->rule_id,
7562                          resp->mirror_rules_used, resp->mirror_rules_free);
7563         *rule_id = rte_le_to_cpu_16(resp->rule_id);
7564
7565         return status;
7566 }
7567
7568 /**
7569  * i40e_aq_del_mirror_rule
7570  * @hw: pointer to the hardware structure
7571  * @seid: VEB seid to add mirror rule to
7572  * @entries: Buffer which contains the entities to be mirrored
7573  * @count: number of entities contained in the buffer
7574  * @rule_id:the rule_id of the rule to be delete
7575  *
7576  * Delete a mirror rule for a given veb.
7577  *
7578  **/
7579 static enum i40e_status_code
7580 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
7581                 uint16_t seid, uint16_t rule_type, uint16_t *entries,
7582                 uint16_t count, uint16_t rule_id)
7583 {
7584         struct i40e_aq_desc desc;
7585         struct i40e_aqc_add_delete_mirror_rule cmd;
7586         uint16_t buff_len = 0;
7587         enum i40e_status_code status;
7588         void *buff = NULL;
7589
7590         i40e_fill_default_direct_cmd_desc(&desc,
7591                                           i40e_aqc_opc_delete_mirror_rule);
7592         memset(&cmd, 0, sizeof(cmd));
7593         if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
7594                 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
7595                                                           I40E_AQ_FLAG_RD));
7596                 cmd.num_entries = count;
7597                 buff_len = sizeof(uint16_t) * count;
7598                 desc.datalen = rte_cpu_to_le_16(buff_len);
7599                 buff = (void *)entries;
7600         } else
7601                 /* rule id is filled in destination field for deleting mirror rule */
7602                 cmd.destination = rte_cpu_to_le_16(rule_id);
7603
7604         cmd.rule_type = rte_cpu_to_le_16(rule_type <<
7605                                 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
7606         cmd.seid = rte_cpu_to_le_16(seid);
7607
7608         rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
7609         status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
7610
7611         return status;
7612 }
7613
7614 /**
7615  * i40e_mirror_rule_set
7616  * @dev: pointer to the hardware structure
7617  * @mirror_conf: mirror rule info
7618  * @sw_id: mirror rule's sw_id
7619  * @on: enable/disable
7620  *
7621  * set a mirror rule.
7622  *
7623  **/
7624 static int
7625 i40e_mirror_rule_set(struct rte_eth_dev *dev,
7626                         struct rte_eth_mirror_conf *mirror_conf,
7627                         uint8_t sw_id, uint8_t on)
7628 {
7629         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7630         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7631         struct i40e_mirror_rule *it, *mirr_rule = NULL;
7632         struct i40e_mirror_rule *parent = NULL;
7633         uint16_t seid, dst_seid, rule_id;
7634         uint16_t i, j = 0;
7635         int ret;
7636
7637         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
7638
7639         if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
7640                 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
7641                         " without veb or vfs.");
7642                 return -ENOSYS;
7643         }
7644         if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
7645                 PMD_DRV_LOG(ERR, "mirror table is full.");
7646                 return -ENOSPC;
7647         }
7648         if (mirror_conf->dst_pool > pf->vf_num) {
7649                 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
7650                                  mirror_conf->dst_pool);
7651                 return -EINVAL;
7652         }
7653
7654         seid = pf->main_vsi->veb->seid;
7655
7656         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
7657                 if (sw_id <= it->index) {
7658                         mirr_rule = it;
7659                         break;
7660                 }
7661                 parent = it;
7662         }
7663         if (mirr_rule && sw_id == mirr_rule->index) {
7664                 if (on) {
7665                         PMD_DRV_LOG(ERR, "mirror rule exists.");
7666                         return -EEXIST;
7667                 } else {
7668                         ret = i40e_aq_del_mirror_rule(hw, seid,
7669                                         mirr_rule->rule_type,
7670                                         mirr_rule->entries,
7671                                         mirr_rule->num_entries, mirr_rule->id);
7672                         if (ret < 0) {
7673                                 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
7674                                                    " ret = %d, aq_err = %d.",
7675                                                    ret, hw->aq.asq_last_status);
7676                                 return -ENOSYS;
7677                         }
7678                         TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
7679                         rte_free(mirr_rule);
7680                         pf->nb_mirror_rule--;
7681                         return 0;
7682                 }
7683         } else if (!on) {
7684                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
7685                 return -ENOENT;
7686         }
7687
7688         mirr_rule = rte_zmalloc("i40e_mirror_rule",
7689                                 sizeof(struct i40e_mirror_rule) , 0);
7690         if (!mirr_rule) {
7691                 PMD_DRV_LOG(ERR, "failed to allocate memory");
7692                 return I40E_ERR_NO_MEMORY;
7693         }
7694         switch (mirror_conf->rule_type) {
7695         case ETH_MIRROR_VLAN:
7696                 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
7697                         if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
7698                                 mirr_rule->entries[j] =
7699                                         mirror_conf->vlan.vlan_id[i];
7700                                 j++;
7701                         }
7702                 }
7703                 if (j == 0) {
7704                         PMD_DRV_LOG(ERR, "vlan is not specified.");
7705                         rte_free(mirr_rule);
7706                         return -EINVAL;
7707                 }
7708                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
7709                 break;
7710         case ETH_MIRROR_VIRTUAL_POOL_UP:
7711         case ETH_MIRROR_VIRTUAL_POOL_DOWN:
7712                 /* check if the specified pool bit is out of range */
7713                 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
7714                         PMD_DRV_LOG(ERR, "pool mask is out of range.");
7715                         rte_free(mirr_rule);
7716                         return -EINVAL;
7717                 }
7718                 for (i = 0, j = 0; i < pf->vf_num; i++) {
7719                         if (mirror_conf->pool_mask & (1ULL << i)) {
7720                                 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
7721                                 j++;
7722                         }
7723                 }
7724                 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
7725                         /* add pf vsi to entries */
7726                         mirr_rule->entries[j] = pf->main_vsi_seid;
7727                         j++;
7728                 }
7729                 if (j == 0) {
7730                         PMD_DRV_LOG(ERR, "pool is not specified.");
7731                         rte_free(mirr_rule);
7732                         return -EINVAL;
7733                 }
7734                 /* egress and ingress in aq commands means from switch but not port */
7735                 mirr_rule->rule_type =
7736                         (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
7737                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
7738                         I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
7739                 break;
7740         case ETH_MIRROR_UPLINK_PORT:
7741                 /* egress and ingress in aq commands means from switch but not port*/
7742                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
7743                 break;
7744         case ETH_MIRROR_DOWNLINK_PORT:
7745                 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
7746                 break;
7747         default:
7748                 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
7749                         mirror_conf->rule_type);
7750                 rte_free(mirr_rule);
7751                 return -EINVAL;
7752         }
7753
7754         /* If the dst_pool is equal to vf_num, consider it as PF */
7755         if (mirror_conf->dst_pool == pf->vf_num)
7756                 dst_seid = pf->main_vsi_seid;
7757         else
7758                 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
7759
7760         ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
7761                                       mirr_rule->rule_type, mirr_rule->entries,
7762                                       j, &rule_id);
7763         if (ret < 0) {
7764                 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
7765                                    " ret = %d, aq_err = %d.",
7766                                    ret, hw->aq.asq_last_status);
7767                 rte_free(mirr_rule);
7768                 return -ENOSYS;
7769         }
7770
7771         mirr_rule->index = sw_id;
7772         mirr_rule->num_entries = j;
7773         mirr_rule->id = rule_id;
7774         mirr_rule->dst_vsi_seid = dst_seid;
7775
7776         if (parent)
7777                 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
7778         else
7779                 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
7780
7781         pf->nb_mirror_rule++;
7782         return 0;
7783 }
7784
7785 /**
7786  * i40e_mirror_rule_reset
7787  * @dev: pointer to the device
7788  * @sw_id: mirror rule's sw_id
7789  *
7790  * reset a mirror rule.
7791  *
7792  **/
7793 static int
7794 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
7795 {
7796         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7797         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7798         struct i40e_mirror_rule *it, *mirr_rule = NULL;
7799         uint16_t seid;
7800         int ret;
7801
7802         PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
7803
7804         seid = pf->main_vsi->veb->seid;
7805
7806         TAILQ_FOREACH(it, &pf->mirror_list, rules) {
7807                 if (sw_id == it->index) {
7808                         mirr_rule = it;
7809                         break;
7810                 }
7811         }
7812         if (mirr_rule) {
7813                 ret = i40e_aq_del_mirror_rule(hw, seid,
7814                                 mirr_rule->rule_type,
7815                                 mirr_rule->entries,
7816                                 mirr_rule->num_entries, mirr_rule->id);
7817                 if (ret < 0) {
7818                         PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
7819                                            " status = %d, aq_err = %d.",
7820                                            ret, hw->aq.asq_last_status);
7821                         return -ENOSYS;
7822                 }
7823                 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
7824                 rte_free(mirr_rule);
7825                 pf->nb_mirror_rule--;
7826         } else {
7827                 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
7828                 return -ENOENT;
7829         }
7830         return 0;
7831 }
7832
7833 static uint64_t
7834 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
7835 {
7836         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7837         uint64_t systim_cycles;
7838
7839         systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
7840         systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
7841                         << 32;
7842
7843         return systim_cycles;
7844 }
7845
7846 static uint64_t
7847 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
7848 {
7849         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7850         uint64_t rx_tstamp;
7851
7852         rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
7853         rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
7854                         << 32;
7855
7856         return rx_tstamp;
7857 }
7858
7859 static uint64_t
7860 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
7861 {
7862         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7863         uint64_t tx_tstamp;
7864
7865         tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
7866         tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
7867                         << 32;
7868
7869         return tx_tstamp;
7870 }
7871
7872 static void
7873 i40e_start_timecounters(struct rte_eth_dev *dev)
7874 {
7875         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7876         struct i40e_adapter *adapter =
7877                         (struct i40e_adapter *)dev->data->dev_private;
7878         struct rte_eth_link link;
7879         uint32_t tsync_inc_l;
7880         uint32_t tsync_inc_h;
7881
7882         /* Get current link speed. */
7883         memset(&link, 0, sizeof(link));
7884         i40e_dev_link_update(dev, 1);
7885         rte_i40e_dev_atomic_read_link_status(dev, &link);
7886
7887         switch (link.link_speed) {
7888         case ETH_LINK_SPEED_40G:
7889                 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
7890                 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
7891                 break;
7892         case ETH_LINK_SPEED_10G:
7893                 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
7894                 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
7895                 break;
7896         case ETH_LINK_SPEED_1000:
7897                 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
7898                 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
7899                 break;
7900         default:
7901                 tsync_inc_l = 0x0;
7902                 tsync_inc_h = 0x0;
7903         }
7904
7905         /* Set the timesync increment value. */
7906         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
7907         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
7908
7909         memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
7910         memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7911         memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7912
7913         adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
7914         adapter->systime_tc.cc_shift = 0;
7915         adapter->systime_tc.nsec_mask = 0;
7916
7917         adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
7918         adapter->rx_tstamp_tc.cc_shift = 0;
7919         adapter->rx_tstamp_tc.nsec_mask = 0;
7920
7921         adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
7922         adapter->tx_tstamp_tc.cc_shift = 0;
7923         adapter->tx_tstamp_tc.nsec_mask = 0;
7924 }
7925
7926 static int
7927 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
7928 {
7929         struct i40e_adapter *adapter =
7930                         (struct i40e_adapter *)dev->data->dev_private;
7931
7932         adapter->systime_tc.nsec += delta;
7933         adapter->rx_tstamp_tc.nsec += delta;
7934         adapter->tx_tstamp_tc.nsec += delta;
7935
7936         return 0;
7937 }
7938
7939 static int
7940 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
7941 {
7942         uint64_t ns;
7943         struct i40e_adapter *adapter =
7944                         (struct i40e_adapter *)dev->data->dev_private;
7945
7946         ns = rte_timespec_to_ns(ts);
7947
7948         /* Set the timecounters to a new value. */
7949         adapter->systime_tc.nsec = ns;
7950         adapter->rx_tstamp_tc.nsec = ns;
7951         adapter->tx_tstamp_tc.nsec = ns;
7952
7953         return 0;
7954 }
7955
7956 static int
7957 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
7958 {
7959         uint64_t ns, systime_cycles;
7960         struct i40e_adapter *adapter =
7961                         (struct i40e_adapter *)dev->data->dev_private;
7962
7963         systime_cycles = i40e_read_systime_cyclecounter(dev);
7964         ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
7965         *ts = rte_ns_to_timespec(ns);
7966
7967         return 0;
7968 }
7969
7970 static int
7971 i40e_timesync_enable(struct rte_eth_dev *dev)
7972 {
7973         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7974         uint32_t tsync_ctl_l;
7975         uint32_t tsync_ctl_h;
7976
7977         /* Stop the timesync system time. */
7978         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
7979         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
7980         /* Reset the timesync system time value. */
7981         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
7982         I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
7983
7984         i40e_start_timecounters(dev);
7985
7986         /* Clear timesync registers. */
7987         I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
7988         I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
7989         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
7990         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
7991         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
7992         I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
7993
7994         /* Enable timestamping of PTP packets. */
7995         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
7996         tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
7997
7998         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
7999         tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
8000         tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
8001
8002         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8003         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8004
8005         return 0;
8006 }
8007
8008 static int
8009 i40e_timesync_disable(struct rte_eth_dev *dev)
8010 {
8011         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8012         uint32_t tsync_ctl_l;
8013         uint32_t tsync_ctl_h;
8014
8015         /* Disable timestamping of transmitted PTP packets. */
8016         tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8017         tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
8018
8019         tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8020         tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
8021
8022         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8023         I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8024
8025         /* Reset the timesync increment value. */
8026         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8027         I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8028
8029         return 0;
8030 }
8031
8032 static int
8033 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
8034                                 struct timespec *timestamp, uint32_t flags)
8035 {
8036         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8037         struct i40e_adapter *adapter =
8038                 (struct i40e_adapter *)dev->data->dev_private;
8039
8040         uint32_t sync_status;
8041         uint32_t index = flags & 0x03;
8042         uint64_t rx_tstamp_cycles;
8043         uint64_t ns;
8044
8045         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
8046         if ((sync_status & (1 << index)) == 0)
8047                 return -EINVAL;
8048
8049         rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
8050         ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
8051         *timestamp = rte_ns_to_timespec(ns);
8052
8053         return 0;
8054 }
8055
8056 static int
8057 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
8058                                 struct timespec *timestamp)
8059 {
8060         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8061         struct i40e_adapter *adapter =
8062                 (struct i40e_adapter *)dev->data->dev_private;
8063
8064         uint32_t sync_status;
8065         uint64_t tx_tstamp_cycles;
8066         uint64_t ns;
8067
8068         sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8069         if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
8070                 return -EINVAL;
8071
8072         tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
8073         ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
8074         *timestamp = rte_ns_to_timespec(ns);
8075
8076         return 0;
8077 }
8078
8079 /*
8080  * i40e_parse_dcb_configure - parse dcb configure from user
8081  * @dev: the device being configured
8082  * @dcb_cfg: pointer of the result of parse
8083  * @*tc_map: bit map of enabled traffic classes
8084  *
8085  * Returns 0 on success, negative value on failure
8086  */
8087 static int
8088 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
8089                          struct i40e_dcbx_config *dcb_cfg,
8090                          uint8_t *tc_map)
8091 {
8092         struct rte_eth_dcb_rx_conf *dcb_rx_conf;
8093         uint8_t i, tc_bw, bw_lf;
8094
8095         memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
8096
8097         dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
8098         if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
8099                 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
8100                 return -EINVAL;
8101         }
8102
8103         /* assume each tc has the same bw */
8104         tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
8105         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
8106                 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
8107         /* to ensure the sum of tcbw is equal to 100 */
8108         bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
8109         for (i = 0; i < bw_lf; i++)
8110                 dcb_cfg->etscfg.tcbwtable[i]++;
8111
8112         /* assume each tc has the same Transmission Selection Algorithm */
8113         for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
8114                 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
8115
8116         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
8117                 dcb_cfg->etscfg.prioritytable[i] =
8118                                 dcb_rx_conf->dcb_tc[i];
8119
8120         /* FW needs one App to configure HW */
8121         dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
8122         dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
8123         dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
8124         dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
8125
8126         if (dcb_rx_conf->nb_tcs == 0)
8127                 *tc_map = 1; /* tc0 only */
8128         else
8129                 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
8130
8131         if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
8132                 dcb_cfg->pfc.willing = 0;
8133                 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
8134                 dcb_cfg->pfc.pfcenable = *tc_map;
8135         }
8136         return 0;
8137 }
8138
8139
8140 static enum i40e_status_code
8141 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
8142                               struct i40e_aqc_vsi_properties_data *info,
8143                               uint8_t enabled_tcmap)
8144 {
8145         enum i40e_status_code ret;
8146         int i, total_tc = 0;
8147         uint16_t qpnum_per_tc, bsf, qp_idx;
8148         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
8149
8150         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
8151         if (ret != I40E_SUCCESS)
8152                 return ret;
8153
8154         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8155                 if (enabled_tcmap & (1 << i))
8156                         total_tc++;
8157         }
8158         if (total_tc == 0)
8159                 total_tc = 1;
8160         vsi->enabled_tc = enabled_tcmap;
8161
8162         qpnum_per_tc = dev_data->nb_rx_queues / total_tc;
8163         /* Number of queues per enabled TC */
8164         if (qpnum_per_tc == 0) {
8165                 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
8166                 return I40E_ERR_INVALID_QP_ID;
8167         }
8168         qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
8169                                 I40E_MAX_Q_PER_TC);
8170         bsf = rte_bsf32(qpnum_per_tc);
8171
8172         /**
8173          * Configure TC and queue mapping parameters, for enabled TC,
8174          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
8175          * default queue will serve it.
8176          */
8177         qp_idx = 0;
8178         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8179                 if (vsi->enabled_tc & (1 << i)) {
8180                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
8181                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
8182                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
8183                         qp_idx += qpnum_per_tc;
8184                 } else
8185                         info->tc_mapping[i] = 0;
8186         }
8187
8188         /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
8189         if (vsi->type == I40E_VSI_SRIOV) {
8190                 info->mapping_flags |=
8191                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
8192                 for (i = 0; i < vsi->nb_qps; i++)
8193                         info->queue_mapping[i] =
8194                                 rte_cpu_to_le_16(vsi->base_queue + i);
8195         } else {
8196                 info->mapping_flags |=
8197                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
8198                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
8199         }
8200         info->valid_sections |=
8201                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
8202
8203         return I40E_SUCCESS;
8204 }
8205
8206 /*
8207  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
8208  * @vsi: VSI to be configured
8209  * @tc_map: enabled TC bitmap
8210  *
8211  * Returns 0 on success, negative value on failure
8212  */
8213 static enum i40e_status_code
8214 i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map)
8215 {
8216         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
8217         struct i40e_vsi_context ctxt;
8218         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
8219         enum i40e_status_code ret = I40E_SUCCESS;
8220         int i;
8221
8222         /* Check if enabled_tc is same as existing or new TCs */
8223         if (vsi->enabled_tc == tc_map)
8224                 return ret;
8225
8226         /* configure tc bandwidth */
8227         memset(&bw_data, 0, sizeof(bw_data));
8228         bw_data.tc_valid_bits = tc_map;
8229         /* Enable ETS TCs with equal BW Share for now across all VSIs */
8230         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8231                 if (tc_map & BIT_ULL(i))
8232                         bw_data.tc_bw_credits[i] = 1;
8233         }
8234         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
8235         if (ret) {
8236                 PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
8237                         " per TC failed = %d",
8238                         hw->aq.asq_last_status);
8239                 goto out;
8240         }
8241         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
8242                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
8243
8244         /* Update Queue Pairs Mapping for currently enabled UPs */
8245         ctxt.seid = vsi->seid;
8246         ctxt.pf_num = hw->pf_id;
8247         ctxt.vf_num = 0;
8248         ctxt.uplink_seid = vsi->uplink_seid;
8249         ctxt.info = vsi->info;
8250         i40e_get_cap(hw);
8251         ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
8252         if (ret)
8253                 goto out;
8254
8255         /* Update the VSI after updating the VSI queue-mapping information */
8256         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8257         if (ret) {
8258                 PMD_INIT_LOG(ERR, "Failed to configure "
8259                             "TC queue mapping = %d",
8260                             hw->aq.asq_last_status);
8261                 goto out;
8262         }
8263         /* update the local VSI info with updated queue map */
8264         (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
8265                                         sizeof(vsi->info.tc_mapping));
8266         (void)rte_memcpy(&vsi->info.queue_mapping,
8267                         &ctxt.info.queue_mapping,
8268                 sizeof(vsi->info.queue_mapping));
8269         vsi->info.mapping_flags = ctxt.info.mapping_flags;
8270         vsi->info.valid_sections = 0;
8271
8272         /* query and update current VSI BW information */
8273         ret = i40e_vsi_get_bw_config(vsi);
8274         if (ret) {
8275                 PMD_INIT_LOG(ERR,
8276                          "Failed updating vsi bw info, err %s aq_err %s",
8277                          i40e_stat_str(hw, ret),
8278                          i40e_aq_str(hw, hw->aq.asq_last_status));
8279                 goto out;
8280         }
8281
8282         vsi->enabled_tc = tc_map;
8283
8284 out:
8285         return ret;
8286 }
8287
8288 /*
8289  * i40e_dcb_hw_configure - program the dcb setting to hw
8290  * @pf: pf the configuration is taken on
8291  * @new_cfg: new configuration
8292  * @tc_map: enabled TC bitmap
8293  *
8294  * Returns 0 on success, negative value on failure
8295  */
8296 static enum i40e_status_code
8297 i40e_dcb_hw_configure(struct i40e_pf *pf,
8298                       struct i40e_dcbx_config *new_cfg,
8299                       uint8_t tc_map)
8300 {
8301         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8302         struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
8303         struct i40e_vsi *main_vsi = pf->main_vsi;
8304         struct i40e_vsi_list *vsi_list;
8305         enum i40e_status_code ret;
8306         int i;
8307         uint32_t val;
8308
8309         /* Use the FW API if FW > v4.4*/
8310         if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
8311               (hw->aq.fw_maj_ver >= 5))) {
8312                 PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
8313                                   " to configure DCB");
8314                 return I40E_ERR_FIRMWARE_API_VERSION;
8315         }
8316
8317         /* Check if need reconfiguration */
8318         if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
8319                 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
8320                 return I40E_SUCCESS;
8321         }
8322
8323         /* Copy the new config to the current config */
8324         *old_cfg = *new_cfg;
8325         old_cfg->etsrec = old_cfg->etscfg;
8326         ret = i40e_set_dcb_config(hw);
8327         if (ret) {
8328                 PMD_INIT_LOG(ERR,
8329                          "Set DCB Config failed, err %s aq_err %s\n",
8330                          i40e_stat_str(hw, ret),
8331                          i40e_aq_str(hw, hw->aq.asq_last_status));
8332                 return ret;
8333         }
8334         /* set receive Arbiter to RR mode and ETS scheme by default */
8335         for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
8336                 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
8337                 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
8338                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
8339                          I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
8340                 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
8341                         I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
8342                          I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
8343                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
8344                          I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
8345                 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
8346                          I40E_PRTDCB_RETSTCC_ETSTC_MASK;
8347                 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
8348         }
8349         /* get local mib to check whether it is configured correctly */
8350         /* IEEE mode */
8351         hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
8352         /* Get Local DCB Config */
8353         i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
8354                                      &hw->local_dcbx_config);
8355
8356         /* Update each VSI */
8357         i40e_vsi_config_tc(main_vsi, tc_map);
8358         if (main_vsi->veb) {
8359                 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
8360                         /* Beside main VSI, only enable default
8361                          * TC for other VSIs
8362                          */
8363                         ret = i40e_vsi_config_tc(vsi_list->vsi,
8364                                                 I40E_DEFAULT_TCMAP);
8365                         if (ret)
8366                                 PMD_INIT_LOG(WARNING,
8367                                          "Failed configuring TC for VSI seid=%d\n",
8368                                          vsi_list->vsi->seid);
8369                         /* continue */
8370                 }
8371         }
8372         return I40E_SUCCESS;
8373 }
8374
8375 /*
8376  * i40e_dcb_init_configure - initial dcb config
8377  * @dev: device being configured
8378  * @sw_dcb: indicate whether dcb is sw configured or hw offload
8379  *
8380  * Returns 0 on success, negative value on failure
8381  */
8382 static int
8383 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
8384 {
8385         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8386         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8387         int ret = 0;
8388
8389         if ((pf->flags & I40E_FLAG_DCB) == 0) {
8390                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
8391                 return -ENOTSUP;
8392         }
8393
8394         /* DCB initialization:
8395          * Update DCB configuration from the Firmware and configure
8396          * LLDP MIB change event.
8397          */
8398         if (sw_dcb == TRUE) {
8399                 ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
8400                 if (ret != I40E_SUCCESS)
8401                         PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
8402
8403                 ret = i40e_init_dcb(hw);
8404                 /* if sw_dcb, lldp agent is stopped, the return from
8405                  * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
8406                  * adminq status.
8407                  */
8408                 if (ret != I40E_SUCCESS &&
8409                     hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
8410                         memset(&hw->local_dcbx_config, 0,
8411                                 sizeof(struct i40e_dcbx_config));
8412                         /* set dcb default configuration */
8413                         hw->local_dcbx_config.etscfg.willing = 0;
8414                         hw->local_dcbx_config.etscfg.maxtcs = 0;
8415                         hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
8416                         hw->local_dcbx_config.etscfg.tsatable[0] =
8417                                                 I40E_IEEE_TSA_ETS;
8418                         hw->local_dcbx_config.etsrec =
8419                                 hw->local_dcbx_config.etscfg;
8420                         hw->local_dcbx_config.pfc.willing = 0;
8421                         hw->local_dcbx_config.pfc.pfccap =
8422                                                 I40E_MAX_TRAFFIC_CLASS;
8423                         /* FW needs one App to configure HW */
8424                         hw->local_dcbx_config.numapps = 1;
8425                         hw->local_dcbx_config.app[0].selector =
8426                                                 I40E_APP_SEL_ETHTYPE;
8427                         hw->local_dcbx_config.app[0].priority = 3;
8428                         hw->local_dcbx_config.app[0].protocolid =
8429                                                 I40E_APP_PROTOID_FCOE;
8430                         ret = i40e_set_dcb_config(hw);
8431                         if (ret) {
8432                                 PMD_INIT_LOG(ERR, "default dcb config fails."
8433                                         " err = %d, aq_err = %d.", ret,
8434                                           hw->aq.asq_last_status);
8435                                 return -ENOSYS;
8436                         }
8437                 } else {
8438                         PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
8439                                           " aq_err = %d.", ret,
8440                                           hw->aq.asq_last_status);
8441                         return -ENOTSUP;
8442                 }
8443         } else {
8444                 ret = i40e_aq_start_lldp(hw, NULL);
8445                 if (ret != I40E_SUCCESS)
8446                         PMD_INIT_LOG(DEBUG, "Failed to start lldp");
8447
8448                 ret = i40e_init_dcb(hw);
8449                 if (!ret) {
8450                         if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
8451                                 PMD_INIT_LOG(ERR, "HW doesn't support"
8452                                                   " DCBX offload.");
8453                                 return -ENOTSUP;
8454                         }
8455                 } else {
8456                         PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
8457                                           " aq_err = %d.", ret,
8458                                           hw->aq.asq_last_status);
8459                         return -ENOTSUP;
8460                 }
8461         }
8462         return 0;
8463 }
8464
8465 /*
8466  * i40e_dcb_setup - setup dcb related config
8467  * @dev: device being configured
8468  *
8469  * Returns 0 on success, negative value on failure
8470  */
8471 static int
8472 i40e_dcb_setup(struct rte_eth_dev *dev)
8473 {
8474         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8475         struct i40e_dcbx_config dcb_cfg;
8476         uint8_t tc_map = 0;
8477         int ret = 0;
8478
8479         if ((pf->flags & I40E_FLAG_DCB) == 0) {
8480                 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
8481                 return -ENOTSUP;
8482         }
8483
8484         if (pf->vf_num != 0 ||
8485             (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
8486                 PMD_INIT_LOG(DEBUG, " DCB only works on main vsi.");
8487
8488         ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
8489         if (ret) {
8490                 PMD_INIT_LOG(ERR, "invalid dcb config");
8491                 return -EINVAL;
8492         }
8493         ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
8494         if (ret) {
8495                 PMD_INIT_LOG(ERR, "dcb sw configure fails");
8496                 return -ENOSYS;
8497         }
8498
8499         return 0;
8500 }
8501
8502 static int
8503 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
8504                       struct rte_eth_dcb_info *dcb_info)
8505 {
8506         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8507         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8508         struct i40e_vsi *vsi = pf->main_vsi;
8509         struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
8510         uint16_t bsf, tc_mapping;
8511         int i;
8512
8513         if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
8514                 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
8515         else
8516                 dcb_info->nb_tcs = 1;
8517         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
8518                 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
8519         for (i = 0; i < dcb_info->nb_tcs; i++)
8520                 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
8521
8522         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8523                 if (vsi->enabled_tc & (1 << i)) {
8524                         tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
8525                         /* only main vsi support multi TCs */
8526                         dcb_info->tc_queue.tc_rxq[0][i].base =
8527                                 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
8528                                 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
8529                         dcb_info->tc_queue.tc_txq[0][i].base =
8530                                 dcb_info->tc_queue.tc_rxq[0][i].base;
8531                         bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
8532                                 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
8533                         dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf;
8534                         dcb_info->tc_queue.tc_txq[0][i].nb_queue =
8535                                 dcb_info->tc_queue.tc_rxq[0][i].nb_queue;
8536                 }
8537         }
8538
8539         return 0;
8540 }
8541
8542 static int
8543 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
8544 {
8545         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
8546         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8547         uint16_t interval =
8548                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
8549         uint16_t msix_intr;
8550
8551         msix_intr = intr_handle->intr_vec[queue_id];
8552         if (msix_intr == I40E_MISC_VEC_ID)
8553                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
8554                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
8555                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
8556                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
8557                                (interval <<
8558                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
8559         else
8560                 I40E_WRITE_REG(hw,
8561                                I40E_PFINT_DYN_CTLN(msix_intr -
8562                                                    I40E_RX_VEC_START),
8563                                I40E_PFINT_DYN_CTLN_INTENA_MASK |
8564                                I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
8565                                (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
8566                                (interval <<
8567                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
8568
8569         I40E_WRITE_FLUSH(hw);
8570         rte_intr_enable(&dev->pci_dev->intr_handle);
8571
8572         return 0;
8573 }
8574
8575 static int
8576 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
8577 {
8578         struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
8579         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8580         uint16_t msix_intr;
8581
8582         msix_intr = intr_handle->intr_vec[queue_id];
8583         if (msix_intr == I40E_MISC_VEC_ID)
8584                 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
8585         else
8586                 I40E_WRITE_REG(hw,
8587                                I40E_PFINT_DYN_CTLN(msix_intr -
8588                                                    I40E_RX_VEC_START),
8589                                0);
8590         I40E_WRITE_FLUSH(hw);
8591
8592         return 0;
8593 }